[{"venue": "ACL", "title": "Data Manipulation: Towards Effective Instance Learning for Neural Dialogue Generation via Learning to Augment and Reweight", "abstract": "Current state-of-the-art neural dialogue models learn from human conversations following the data-driven paradigm. As such, a reliable training corpus is the crux of building a robust and well-behaved dialogue model. However, due to the open-ended nature of human conversations, the quality of user-generated training data varies greatly, and effective training samples are typically insufficient while noisy samples frequently appear. This impedes the learning of those data-driven neural dialogue models. Therefore, effective dialogue learning requires not only more reliable learning samples, but also fewer noisy samples. In this paper, we propose a data manipulation framework to proactively reshape the data distribution towards reliable samples by augmenting and highlighting effective learning samples as well as reducing the effect of inefficient samples simultaneously. In particular, the data manipulation model selectively augments the training samples and assigns an importance weight to each instance to reform the training data. Note that, the proposed data manipulation framework is fully data-driven and learnable. It not only manipulates training samples to optimize the dialogue generation model, but also learns to increase its manipulation skills through gradient descent with validation samples. Extensive experiments show that our framework can improve the dialogue generation performance with respect to various automatic evaluation metrics and human judgments.", "doc_id": "206628c44ef09a294a61fc2bd0568d59", "publication_year": 2020, "sentences": ["current state - of - the - art neural dialogue models learn from human conversations following the data - driven paradigm .", "as such , a reliable training corpus is the crux of building a robust and well - behaved dialogue model .", "however , due to the open - ended nature of human conversations , the quality of user - generated training data varies greatly , and effective training samples are typically insufficient while noisy samples frequently appear .", "this impedes the learning of those data - driven neural dialogue models .", "therefore , effective dialogue learning requires not only more reliable learning samples , but also fewer noisy samples .", "in this paper , we propose a data manipulation framework to proactively reshape the data distribution towards reliable samples by augmenting and highlighting effective learning samples as well as reducing the effect of inefficient samples simultaneously .", "in particular , the data manipulation model selectively augments the training samples and assigns an importance weight to each instance to reform the training data .", "note that , the proposed data manipulation framework is fully data - driven and learnable .", "it not only manipulates training samples to optimize the dialogue generation model , but also learns to increase its manipulation skills through gradient descent with validation samples .", "extensive experiments show that our framework can improve the dialogue generation performance with respect to various automatic evaluation metrics and human judgments ."], "events": [{"event_type": "ITT", "arguments": [{"text": "reliable training corpus", "nugget_type": "DST", "argument_type": "Target", "tokens": ["reliable", "training", "corpus"], "offsets": [26, 27, 28]}], "trigger": {"text": "crux", "tokens": ["crux"], "offsets": [31]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [116]}, {"text": "data manipulation framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["data", "manipulation", "framework"], "offsets": [119, 120, 121]}, {"text": "proactively reshape", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["proactively", "reshape"], "offsets": [123, 124]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [117]}}, {"event_type": "PUR", "arguments": [{"text": "data distribution", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["data", "distribution"], "offsets": [126, 127]}], "trigger": {"text": "proactively reshape", "tokens": ["proactively", "reshape"], "offsets": [123, 124]}}, {"event_type": "MDS", "arguments": [{"text": "training samples", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["training", "samples"], "offsets": [159, 160]}], "trigger": {"text": "selectively augments", "tokens": ["selectively", "augments"], "offsets": [156, 157]}}, {"event_type": "MDS", "arguments": [{"text": "each instance", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["each", "instance"], "offsets": [167, 168]}, {"text": "reform", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reform"], "offsets": [170]}], "trigger": {"text": "assigns an importance weight", "tokens": ["assigns", "an", "importance", "weight"], "offsets": [162, 163, 164, 165]}}, {"event_type": "PUR", "arguments": [{"text": "training data", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["training", "data"], "offsets": [172, 173]}], "trigger": {"text": "reform", "tokens": ["reform"], "offsets": [170]}}, {"event_type": "MDS", "arguments": [{"text": "dialogue generation model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["dialogue", "generation", "model"], "offsets": [200, 201, 202]}, {"text": "training samples", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["training", "samples"], "offsets": [195, 196]}], "trigger": {"text": "optimize", "tokens": ["optimize"], "offsets": [198]}}, {"event_type": "MDS", "arguments": [{"text": "gradient descent", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["gradient", "descent"], "offsets": [213, 214]}, {"text": "manipulation skills", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["manipulation", "skills"], "offsets": [210, 211]}], "trigger": {"text": "increase", "tokens": ["increase"], "offsets": [208]}}, {"event_type": "CMP", "arguments": [{"text": "data manipulation framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["data", "manipulation", "framework"], "offsets": [119, 120, 121]}, {"text": "improve", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improve"], "offsets": [226]}, {"text": "various automatic evaluation metrics", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["various", "automatic", "evaluation", "metrics"], "offsets": [234, 235, 236, 237]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [226]}}], "document": ["current", "state", "-", "of", "-", "the", "-", "art", "neural", "dialogue", "models", "learn", "from", "human", "conversations", "following", "the", "data", "-", "driven", "paradigm", ".", "as", "such", ",", "a", "reliable", "training", "corpus", "is", "the", "crux", "of", "building", "a", "robust", "and", "well", "-", "behaved", "dialogue", "model", ".", "however", ",", "due", "to", "the", "open", "-", "ended", "nature", "of", "human", "conversations", ",", "the", "quality", "of", "user", "-", "generated", "training", "data", "varies", "greatly", ",", "and", "effective", "training", "samples", "are", "typically", "insufficient", "while", "noisy", "samples", "frequently", "appear", ".", "this", "impedes", "the", "learning", "of", "those", "data", "-", "driven", "neural", "dialogue", "models", ".", "therefore", ",", "effective", "dialogue", "learning", "requires", "not", "only", "more", "reliable", "learning", "samples", ",", "but", "also", "fewer", "noisy", "samples", ".", "in", "this", "paper", ",", "we", "propose", "a", "data", "manipulation", "framework", "to", "proactively", "reshape", "the", "data", "distribution", "towards", "reliable", "samples", "by", "augmenting", "and", "highlighting", "effective", "learning", "samples", "as", "well", "as", "reducing", "the", "effect", "of", "inefficient", "samples", "simultaneously", ".", "in", "particular", ",", "the", "data", "manipulation", "model", "selectively", "augments", "the", "training", "samples", "and", "assigns", "an", "importance", "weight", "to", "each", "instance", "to", "reform", "the", "training", "data", ".", "note", "that", ",", "the", "proposed", "data", "manipulation", "framework", "is", "fully", "data", "-", "driven", "and", "learnable", ".", "it", "not", "only", "manipulates", "training", "samples", "to", "optimize", "the", "dialogue", "generation", "model", ",", "but", "also", "learns", "to", "increase", "its", "manipulation", "skills", "through", "gradient", "descent", "with", "validation", "samples", ".", "extensive", "experiments", "show", "that", "our", "framework", "can", "improve", "the", "dialogue", "generation", "performance", "with", "respect", "to", "various", "automatic", "evaluation", "metrics", "and", "human", "judgments", "."]}, {"venue": "ACL", "title": "Is Word Segmentation Child\u2019s Play in All Languages?", "abstract": "When learning language, infants need to break down the flow of input speech into minimal word-like units, a process best described as unsupervised bottom-up segmentation. Proposed strategies include several segmentation algorithms, but only cross-linguistically robust algorithms could be plausible candidates for human word learning, since infants have no initial knowledge of the ambient language. We report on the stability in performance of 11 conceptually diverse algorithms on a selection of 8 typologically distinct languages. The results consist evidence that some segmentation algorithms are cross-linguistically valid, thus could be considered as potential strategies employed by all infants.", "doc_id": "0dae3d89031313fd25a341da603697f7", "publication_year": 2019, "sentences": ["when learning language , infants need to break down the flow of input speech into minimal word - like units , a process best described as unsupervised bottom - up segmentation .", "proposed strategies include several segmentation algorithms , but only cross - linguistically robust algorithms could be plausible candidates for human word learning , since infants have no initial knowledge of the ambient language .", "we report on the stability in performance of 11 conceptually diverse algorithms on a selection of 8 typologically distinct languages .", "the results consist evidence that some segmentation algorithms are cross - linguistically valid , thus could be considered as potential strategies employed by all infants ."], "events": [{"event_type": "ITT", "arguments": [{"text": "unsupervised bottom - up segmentation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["unsupervised", "bottom", "-", "up", "segmentation"], "offsets": [26, 27, 28, 29, 30]}], "trigger": {"text": "described", "tokens": ["described"], "offsets": [24]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [66]}, {"text": "on a selection of 8 typologically distinct languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "8", "typologically", "distinct", "languages"], "offsets": [78, 82, 83, 84, 85]}, {"text": "stability in performance of 11 conceptually diverse algorithms", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["stability", "in", "performance", "of", "11", "conceptually", "diverse", "algorithms"], "offsets": [70, 71, 72, 73, 74, 75, 76, 77]}], "trigger": {"text": "report", "tokens": ["report"], "offsets": [67]}}, {"event_type": "FAC", "arguments": [{"text": "some segmentation algorithms", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["some", "segmentation", "algorithms"], "offsets": [92, 93, 94]}], "trigger": {"text": "cross - linguistically valid", "tokens": ["cross", "-", "linguistically", "valid"], "offsets": [96, 97, 98, 99]}}, {"event_type": "FIN", "arguments": [{"text": "cross - linguistically valid", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["cross", "-", "linguistically", "valid"], "offsets": [96, 97, 98, 99]}], "trigger": {"text": "consist", "tokens": ["consist"], "offsets": [89]}}], "document": ["when", "learning", "language", ",", "infants", "need", "to", "break", "down", "the", "flow", "of", "input", "speech", "into", "minimal", "word", "-", "like", "units", ",", "a", "process", "best", "described", "as", "unsupervised", "bottom", "-", "up", "segmentation", ".", "proposed", "strategies", "include", "several", "segmentation", "algorithms", ",", "but", "only", "cross", "-", "linguistically", "robust", "algorithms", "could", "be", "plausible", "candidates", "for", "human", "word", "learning", ",", "since", "infants", "have", "no", "initial", "knowledge", "of", "the", "ambient", "language", ".", "we", "report", "on", "the", "stability", "in", "performance", "of", "11", "conceptually", "diverse", "algorithms", "on", "a", "selection", "of", "8", "typologically", "distinct", "languages", ".", "the", "results", "consist", "evidence", "that", "some", "segmentation", "algorithms", "are", "cross", "-", "linguistically", "valid", ",", "thus", "could", "be", "considered", "as", "potential", "strategies", "employed", "by", "all", "infants", "."]}, {"venue": "ACL", "title": "On the Word Alignment from Neural Machine Translation", "abstract": "Prior researches suggest that neural machine translation (NMT) captures word alignment through its attention mechanism, however, this paper finds attention may almost fail to capture word alignment for some NMT models. This paper thereby proposes two methods to induce word alignment which are general and agnostic to specific NMT models. Experiments show that both methods induce much better word alignment than attention. This paper further visualizes the translation through the word alignment induced by NMT. In particular, it analyzes the effect of alignment errors on translation errors at word level and its quantitative analysis over many testing examples consistently demonstrate that alignment errors are likely to lead to translation errors measured by different metrics.", "doc_id": "03c5d8a9251a3d5102d3fc476289fd2b", "publication_year": 2019, "sentences": ["prior researches suggest that neural machine translation ( nmt ) captures word alignment through its attention mechanism , however , this paper finds attention may almost fail to capture word alignment for some nmt models .", "this paper thereby proposes two methods to induce word alignment which are general and agnostic to specific nmt models .", "experiments show that both methods induce much better word alignment than attention .", "this paper further visualizes the translation through the word alignment induced by nmt .", "in particular , it analyzes the effect of alignment errors on translation errors at word level and its quantitative analysis over many testing examples consistently demonstrate that alignment errors are likely to lead to translation errors measured by different metrics ."], "events": [{"event_type": "RWS", "arguments": [{"text": "attention mechanism", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["attention", "mechanism"], "offsets": [15, 16]}, {"text": "word alignment", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["word", "alignment"], "offsets": [11, 12]}], "trigger": {"text": "captures", "tokens": ["captures"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "attention", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["attention"], "offsets": [23]}, {"text": "word alignment", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["word", "alignment"], "offsets": [29, 30]}, {"text": "for some nmt models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "some", "nmt", "models"], "offsets": [31, 32, 33, 34]}], "trigger": {"text": "fail to capture", "tokens": ["fail", "to", "capture"], "offsets": [26, 27, 28]}}, {"event_type": "PRP", "arguments": [{"text": "two methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "methods"], "offsets": [40, 41]}, {"text": "induce", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["induce"], "offsets": [43]}], "trigger": {"text": "proposes", "tokens": ["proposes"], "offsets": [39]}}, {"event_type": "PUR", "arguments": [{"text": "word alignment", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["word", "alignment"], "offsets": [44, 45]}], "trigger": {"text": "induce", "tokens": ["induce"], "offsets": [43]}}, {"event_type": "FIN", "arguments": [{"text": "induce", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["induce"], "offsets": [61]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [57]}}, {"event_type": "CMP", "arguments": [{"text": "both methods", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["both", "methods"], "offsets": [59, 60]}, {"text": "attention", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["attention"], "offsets": [67]}, {"text": "much better word alignment", "nugget_type": "STR", "argument_type": "Result", "tokens": ["much", "better", "word", "alignment"], "offsets": [62, 63, 64, 65]}], "trigger": {"text": "induce", "tokens": ["induce"], "offsets": [61]}}, {"event_type": "FAC", "arguments": [{"text": "effect of alignment errors on translation errors at word level", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["effect", "of", "alignment", "errors", "on", "translation", "errors", "at", "word", "level"], "offsets": [89, 90, 91, 92, 93, 94, 95, 96, 97, 98]}], "trigger": {"text": "analyzes", "tokens": ["analyzes"], "offsets": [87]}}, {"event_type": "FIN", "arguments": [{"text": "lead", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["lead"], "offsets": [115]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [108]}}, {"event_type": "FAC", "arguments": [{"text": "alignment errors", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["alignment", "errors"], "offsets": [110, 111]}, {"text": "translation errors measured by different metrics", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["translation", "errors", "measured", "by", "different", "metrics"], "offsets": [117, 118, 119, 120, 121, 122]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [115]}}], "document": ["prior", "researches", "suggest", "that", "neural", "machine", "translation", "(", "nmt", ")", "captures", "word", "alignment", "through", "its", "attention", "mechanism", ",", "however", ",", "this", "paper", "finds", "attention", "may", "almost", "fail", "to", "capture", "word", "alignment", "for", "some", "nmt", "models", ".", "this", "paper", "thereby", "proposes", "two", "methods", "to", "induce", "word", "alignment", "which", "are", "general", "and", "agnostic", "to", "specific", "nmt", "models", ".", "experiments", "show", "that", "both", "methods", "induce", "much", "better", "word", "alignment", "than", "attention", ".", "this", "paper", "further", "visualizes", "the", "translation", "through", "the", "word", "alignment", "induced", "by", "nmt", ".", "in", "particular", ",", "it", "analyzes", "the", "effect", "of", "alignment", "errors", "on", "translation", "errors", "at", "word", "level", "and", "its", "quantitative", "analysis", "over", "many", "testing", "examples", "consistently", "demonstrate", "that", "alignment", "errors", "are", "likely", "to", "lead", "to", "translation", "errors", "measured", "by", "different", "metrics", "."]}, {"venue": "ACL", "title": "Point, Disambiguate and Copy: Incorporating Bilingual Dictionaries for Neural Machine Translation", "abstract": "This paper proposes a sophisticated neural architecture to incorporate bilingual dictionaries into Neural Machine Translation (NMT) models. By introducing three novel components: Pointer, Disambiguator, and Copier, our method PDC achieves the following merits inherently compared with previous efforts: (1) Pointer leverages the semantic information from bilingual dictionaries, for the first time, to better locate source words whose translation in dictionaries can potentially be used; (2) Disambiguator synthesizes contextual information from the source view and the target view, both of which contribute to distinguishing the proper translation of a specific source word from multiple candidates in dictionaries; (3) Copier systematically connects Pointer and Disambiguator based on a hierarchical copy mechanism seamlessly integrated with Transformer, thereby building an end-to-end architecture that could avoid error propagation problems in alternative pipe-line methods. The experimental results on Chinese-English and English-Japanese benchmarks demonstrate the PDC\u2019s overall superiority and effectiveness of each component.", "doc_id": "5393a581f1995c01cfe9e63d5706c159", "publication_year": 2021, "sentences": ["this paper proposes a sophisticated neural architecture to incorporate bilingual dictionaries into neural machine translation ( nmt ) models .", "by introducing three novel components : pointer , disambiguator , and copier , our method pdc achieves the following merits inherently compared with previous efforts : ( 1 ) pointer leverages the semantic information from bilingual dictionaries , for the first time , to better locate source words whose translation in dictionaries can potentially be used ; ( 2 ) disambiguator synthesizes contextual information from the source view and the target view , both of which contribute to distinguishing the proper translation of a specific source word from multiple candidates in dictionaries ; ( 3 ) copier systematically connects pointer and disambiguator based on a hierarchical copy mechanism seamlessly integrated with transformer , thereby building an end - to - end architecture that could avoid error propagation problems in alternative pipe - line methods .", "the experimental results on chinese - english and english - japanese benchmarks demonstrate the pdc \u2019 s overall superiority and effectiveness of each component ."], "events": [{"event_type": "PRP", "arguments": [{"text": "sophisticated neural architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["sophisticated", "neural", "architecture"], "offsets": [4, 5, 6]}], "trigger": {"text": "proposes", "tokens": ["proposes"], "offsets": [2]}}, {"event_type": "MDS", "arguments": [{"text": "bilingual dictionaries", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["bilingual", "dictionaries"], "offsets": [9, 10]}, {"text": "neural machine translation ( nmt ) models", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["neural", "machine", "translation", "(", "nmt", ")", "models"], "offsets": [12, 13, 14, 15, 16, 17, 18]}], "trigger": {"text": "incorporate", "tokens": ["incorporate"], "offsets": [8]}}, {"event_type": "PRP", "arguments": [{"text": "three novel components", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["three", "novel", "components"], "offsets": [22, 23, 24]}], "trigger": {"text": "introducing", "tokens": ["introducing"], "offsets": [21]}}, {"event_type": "CMP", "arguments": [{"text": "pdc", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["pdc"], "offsets": [35]}, {"text": "previous efforts", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "efforts"], "offsets": [43, 44]}, {"text": "following merits inherently", "nugget_type": "STR", "argument_type": "Result", "tokens": ["following", "merits", "inherently"], "offsets": [38, 39, 40]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [36]}}, {"event_type": "MDS", "arguments": [{"text": "pointer", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["pointer"], "offsets": [49]}, {"text": "semantic information from bilingual dictionaries", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["semantic", "information", "from", "bilingual", "dictionaries"], "offsets": [52, 53, 54, 55, 56]}, {"text": "better locate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["better", "locate"], "offsets": [64, 65]}], "trigger": {"text": "leverages", "tokens": ["leverages"], "offsets": [50]}}, {"event_type": "PUR", "arguments": [{"text": "source words whose translation in dictionaries can potentially be used", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["source", "words", "whose", "translation", "in", "dictionaries", "can", "potentially", "be", "used"], "offsets": [66, 67, 68, 69, 70, 71, 72, 73, 74, 75]}], "trigger": {"text": "better locate", "tokens": ["better", "locate"], "offsets": [64, 65]}}, {"event_type": "MDS", "arguments": [{"text": "disambiguator", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["disambiguator"], "offsets": [80]}, {"text": "contextual information from the source view", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["contextual", "information", "from", "the", "source", "view"], "offsets": [82, 83, 84, 85, 86, 87]}, {"text": "contextual information from the target view", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["contextual", "information", "from", "the", "target", "view"], "offsets": [82, 83, 84, 89, 90, 91]}], "trigger": {"text": "synthesizes", "tokens": ["synthesizes"], "offsets": [81]}}, {"event_type": "MDS", "arguments": [{"text": "copier", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["copier"], "offsets": [116]}, {"text": "pointer", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["pointer"], "offsets": [119]}, {"text": "disambiguator", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["disambiguator"], "offsets": [121]}, {"text": "based on a hierarchical copy mechanism seamlessly integrated with transformer", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "a", "hierarchical", "copy", "mechanism", "seamlessly", "integrated", "with", "transformer"], "offsets": [122, 123, 124, 125, 126, 127, 128, 129, 130, 131]}, {"text": "building", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["building"], "offsets": [134]}, {"text": "could avoid", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["could", "avoid"], "offsets": [143, 144]}], "trigger": {"text": "systematically connects", "tokens": ["systematically", "connects"], "offsets": [117, 118]}}, {"event_type": "PUR", "arguments": [{"text": "end - to - end architecture", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["end", "-", "to", "-", "end", "architecture"], "offsets": [136, 137, 138, 139, 140, 141]}], "trigger": {"text": "building", "tokens": ["building"], "offsets": [134]}}, {"event_type": "PUR", "arguments": [{"text": "error propagation problems in alternative pipe - line methods", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["error", "propagation", "problems", "in", "alternative", "pipe", "-", "line", "methods"], "offsets": [145, 146, 147, 148, 149, 150, 151, 152, 153]}], "trigger": {"text": "could avoid", "tokens": ["could", "avoid"], "offsets": [143, 144]}}, {"event_type": "FAC", "arguments": [{"text": "pdc", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["pdc"], "offsets": [169]}, {"text": "overall superiority", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["overall", "superiority"], "offsets": [172, 173]}, {"text": "each component", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["each", "component"], "offsets": [177, 178]}, {"text": "effectiveness", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["effectiveness"], "offsets": [175]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [167]}}], "document": ["this", "paper", "proposes", "a", "sophisticated", "neural", "architecture", "to", "incorporate", "bilingual", "dictionaries", "into", "neural", "machine", "translation", "(", "nmt", ")", "models", ".", "by", "introducing", "three", "novel", "components", ":", "pointer", ",", "disambiguator", ",", "and", "copier", ",", "our", "method", "pdc", "achieves", "the", "following", "merits", "inherently", "compared", "with", "previous", "efforts", ":", "(", "1", ")", "pointer", "leverages", "the", "semantic", "information", "from", "bilingual", "dictionaries", ",", "for", "the", "first", "time", ",", "to", "better", "locate", "source", "words", "whose", "translation", "in", "dictionaries", "can", "potentially", "be", "used", ";", "(", "2", ")", "disambiguator", "synthesizes", "contextual", "information", "from", "the", "source", "view", "and", "the", "target", "view", ",", "both", "of", "which", "contribute", "to", "distinguishing", "the", "proper", "translation", "of", "a", "specific", "source", "word", "from", "multiple", "candidates", "in", "dictionaries", ";", "(", "3", ")", "copier", "systematically", "connects", "pointer", "and", "disambiguator", "based", "on", "a", "hierarchical", "copy", "mechanism", "seamlessly", "integrated", "with", "transformer", ",", "thereby", "building", "an", "end", "-", "to", "-", "end", "architecture", "that", "could", "avoid", "error", "propagation", "problems", "in", "alternative", "pipe", "-", "line", "methods", ".", "the", "experimental", "results", "on", "chinese", "-", "english", "and", "english", "-", "japanese", "benchmarks", "demonstrate", "the", "pdc", "\u2019", "s", "overall", "superiority", "and", "effectiveness", "of", "each", "component", "."]}, {"venue": "ACL", "title": "Improving the Generalizability of Depression Detection by Leveraging Clinical Questionnaires", "abstract": "Automated methods have been widely used to identify and analyze mental health conditions (e.g., depression) from various sources of information, including social media. Yet, deployment of such models in real-world healthcare applications faces challenges including poor out-of-domain generalization and lack of trust in black box models. In this work, we propose approaches for depression detection that are constrained to different degrees by the presence of symptoms described in PHQ9, a questionnaire used by clinicians in the depression screening process. In dataset-transfer experiments on three social media datasets, we find that grounding the model in PHQ9\u2019s symptoms substantially improves its ability to generalize to out-of-distribution data compared to a standard BERT-based approach. Furthermore, this approach can still perform competitively on in-domain data. These results and our qualitative analyses suggest that grounding model predictions in clinically-relevant symptoms can improve generalizability while producing a model that is easier to inspect.", "doc_id": "68d4b389ff7cc1762d839b88be4dcbf1", "publication_year": 2022, "sentences": ["automated methods have been widely used to identify and analyze mental health conditions ( e . g . , depression ) from various sources of information , including social media .", "yet , deployment of such models in real - world healthcare applications faces challenges including poor out - of - domain generalization and lack of trust in black box models .", "in this work , we propose approaches for depression detection that are constrained to different degrees by the presence of symptoms described in phq9 , a questionnaire used by clinicians in the depression screening process .", "in dataset - transfer experiments on three social media datasets , we find that grounding the model in phq9 \u2019 s symptoms substantially improves its ability to generalize to out - of - distribution data compared to a standard bert - based approach .", "furthermore , this approach can still perform competitively on in - domain data .", "these results and our qualitative analyses suggest that grounding model predictions in clinically - relevant symptoms can improve generalizability while producing a model that is easier to inspect ."], "events": [{"event_type": "ITT", "arguments": [{"text": "automated methods", "nugget_type": "APP", "argument_type": "Target", "tokens": ["automated", "methods"], "offsets": [0, 1]}], "trigger": {"text": "identify and analyze", "tokens": ["identify", "and", "analyze"], "offsets": [7, 8, 9]}}, {"event_type": "RWF", "arguments": [{"text": "poor out - of - domain generalization", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["poor", "out", "-", "of", "-", "domain", "generalization"], "offsets": [46, 47, 48, 49, 50, 51, 52]}, {"text": "lack of trust", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lack", "of", "trust"], "offsets": [54, 55, 56]}, {"text": "in black box models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "black", "box", "models"], "offsets": [57, 58, 59, 60]}], "trigger": {"text": "including", "tokens": ["including"], "offsets": [45]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [66]}, {"text": "approaches", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approaches"], "offsets": [68]}, {"text": "depression detection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["depression", "detection"], "offsets": [70, 71]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [67]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [109]}, {"text": "improves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["improves"], "offsets": [121]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [110]}}, {"event_type": "CMP", "arguments": [{"text": "in phq9 \u2019 s symptoms", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "phq9", "\u2019", "s", "symptoms"], "offsets": [115, 116, 117, 118, 119]}, {"text": "substantially", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["substantially"], "offsets": [120]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [121]}, {"text": "ability to generalize to out - of - distribution data", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["ability", "to", "generalize", "to", "out", "-", "of", "-", "distribution", "data"], "offsets": [123, 124, 125, 126, 127, 128, 129, 130, 131, 132]}, {"text": "standard bert - based approach", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["standard", "bert", "-", "based", "approach"], "offsets": [136, 137, 138, 139, 140]}, {"text": "grounding the model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["grounding", "the", "model"], "offsets": [112, 113, 114]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [121]}}, {"event_type": "FAC", "arguments": [{"text": "approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["approach"], "offsets": [145]}, {"text": "in - domain data", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["in", "-", "domain", "data"], "offsets": [151, 152, 153, 154]}, {"text": "competitively", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["competitively"], "offsets": [149]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [148]}}, {"event_type": "FIN", "arguments": [{"text": "improve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["improve"], "offsets": [173]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [162]}}, {"event_type": "FAC", "arguments": [{"text": "in clinically - relevant symptoms", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "clinically", "-", "relevant", "symptoms"], "offsets": [167, 168, 169, 170, 171]}, {"text": "generalizability", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["generalizability"], "offsets": [174]}, {"text": "grounding model predictions", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["grounding", "model", "predictions"], "offsets": [164, 165, 166]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [173]}}, {"event_type": "FIN", "arguments": [{"text": "producing", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["producing"], "offsets": [176]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [162]}}, {"event_type": "FAC", "arguments": [{"text": "grounding model predictions", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["grounding", "model", "predictions"], "offsets": [164, 165, 166]}, {"text": "model", "nugget_type": "APP", "argument_type": "Object", "tokens": ["model"], "offsets": [178]}, {"text": "that is easier to inspect", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["that", "is", "easier", "to", "inspect"], "offsets": [179, 180, 181, 182, 183]}], "trigger": {"text": "producing", "tokens": ["producing"], "offsets": [176]}}], "document": ["automated", "methods", "have", "been", "widely", "used", "to", "identify", "and", "analyze", "mental", "health", "conditions", "(", "e", ".", "g", ".", ",", "depression", ")", "from", "various", "sources", "of", "information", ",", "including", "social", "media", ".", "yet", ",", "deployment", "of", "such", "models", "in", "real", "-", "world", "healthcare", "applications", "faces", "challenges", "including", "poor", "out", "-", "of", "-", "domain", "generalization", "and", "lack", "of", "trust", "in", "black", "box", "models", ".", "in", "this", "work", ",", "we", "propose", "approaches", "for", "depression", "detection", "that", "are", "constrained", "to", "different", "degrees", "by", "the", "presence", "of", "symptoms", "described", "in", "phq9", ",", "a", "questionnaire", "used", "by", "clinicians", "in", "the", "depression", "screening", "process", ".", "in", "dataset", "-", "transfer", "experiments", "on", "three", "social", "media", "datasets", ",", "we", "find", "that", "grounding", "the", "model", "in", "phq9", "\u2019", "s", "symptoms", "substantially", "improves", "its", "ability", "to", "generalize", "to", "out", "-", "of", "-", "distribution", "data", "compared", "to", "a", "standard", "bert", "-", "based", "approach", ".", "furthermore", ",", "this", "approach", "can", "still", "perform", "competitively", "on", "in", "-", "domain", "data", ".", "these", "results", "and", "our", "qualitative", "analyses", "suggest", "that", "grounding", "model", "predictions", "in", "clinically", "-", "relevant", "symptoms", "can", "improve", "generalizability", "while", "producing", "a", "model", "that", "is", "easier", "to", "inspect", "."]}, {"venue": "ACL", "title": "Probing as Quantifying Inductive Bias", "abstract": "Pre-trained contextual representations have led to dramatic performance improvements on a range of downstream tasks. Such performance improvements have motivated researchers to quantify and understand the linguistic information encoded in these representations. In general, researchers quantify the amount of linguistic information through probing, an endeavor which consists of training a supervised model to predict a linguistic property directly from the contextual representations. Unfortunately, this definition of probing has been subject to extensive criticism in the literature, and has been observed to lead to paradoxical and counter-intuitive results. In the theoretical portion of this paper, we take the position that the goal of probing ought to be measuring the amount of inductive bias that the representations encode on a specific task. We further describe a Bayesian framework that operationalizes this goal and allows us to quantify the representations\u2019 inductive bias. In the empirical portion of the paper, we apply our framework to a variety of NLP tasks. Our results suggest that our proposed framework alleviates many previous problems found in probing. Moreover, we are able to offer concrete evidence that\u2014for some tasks\u2014fastText can offer a better inductive bias than BERT.", "doc_id": "c2e0e07c25c7430ae9d1352730df3a6f", "publication_year": 2022, "sentences": ["pre - trained contextual representations have led to dramatic performance improvements on a range of downstream tasks .", "such performance improvements have motivated researchers to quantify and understand the linguistic information encoded in these representations .", "in general , researchers quantify the amount of linguistic information through probing , an endeavor which consists of training a supervised model to predict a linguistic property directly from the contextual representations .", "unfortunately , this definition of probing has been subject to extensive criticism in the literature , and has been observed to lead to paradoxical and counter - intuitive results .", "in the theoretical portion of this paper , we take the position that the goal of probing ought to be measuring the amount of inductive bias that the representations encode on a specific task .", "we further describe a bayesian framework that operationalizes this goal and allows us to quantify the representations \u2019 inductive bias .", "in the empirical portion of the paper , we apply our framework to a variety of nlp tasks .", "our results suggest that our proposed framework alleviates many previous problems found in probing .", "moreover , we are able to offer concrete evidence that \u2014 for some tasks", "\u2014 fasttext can offer a better inductive bias than bert ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pre - trained contextual representations", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["pre", "-", "trained", "contextual", "representations"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "led", "tokens": ["led"], "offsets": [6]}}, {"event_type": "RWS", "arguments": [{"text": "supervised model", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["supervised", "model"], "offsets": [56, 57]}, {"text": "contextual representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["contextual", "representations"], "offsets": [66, 67]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [59]}, {"text": "directly", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["directly"], "offsets": [63]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [54]}}, {"event_type": "PUR", "arguments": [{"text": "linguistic property", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["linguistic", "property"], "offsets": [61, 62]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [59]}}, {"event_type": "RWF", "arguments": [{"text": "paradoxical and counter - intuitive results", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["paradoxical", "and", "counter", "-", "intuitive", "results"], "offsets": [92, 93, 94, 95, 96, 97]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [90]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [107]}, {"text": "inductive bias", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["inductive", "bias"], "offsets": [123, 124]}, {"text": "on a specific task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "a", "specific", "task"], "offsets": [129, 130, 131, 132]}], "trigger": {"text": "measuring", "tokens": ["measuring"], "offsets": [119]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [134]}, {"text": "bayesian framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["bayesian", "framework"], "offsets": [138, 139]}, {"text": "operationalizes", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["operationalizes"], "offsets": [141]}], "trigger": {"text": "describe", "tokens": ["describe"], "offsets": [136]}}, {"event_type": "PUR", "arguments": [{"text": "this goal", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["this", "goal"], "offsets": [142, 143]}], "trigger": {"text": "operationalizes", "tokens": ["operationalizes"], "offsets": [141]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [134]}, {"text": "representations \u2019 inductive bias", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["representations", "\u2019", "inductive", "bias"], "offsets": [150, 151, 152, 153]}], "trigger": {"text": "quantify", "tokens": ["quantify"], "offsets": [148]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [163]}, {"text": "bayesian framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["bayesian", "framework"], "offsets": [138, 139]}, {"text": "nlp tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nlp", "tasks"], "offsets": [171, 172]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [164]}}, {"event_type": "FAC", "arguments": [{"text": "proposed framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["proposed", "framework"], "offsets": [179, 180]}, {"text": "many previous problems", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["many", "previous", "problems"], "offsets": [182, 183, 184]}, {"text": "in probing", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "probing"], "offsets": [186, 187]}], "trigger": {"text": "alleviates", "tokens": ["alleviates"], "offsets": [181]}}, {"event_type": "CMP", "arguments": [{"text": "fasttext", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["fasttext"], "offsets": [204]}, {"text": "bert", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["bert"], "offsets": [212]}, {"text": "better inductive bias", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better", "inductive", "bias"], "offsets": [208, 209, 210]}], "trigger": {"text": "offer", "tokens": ["offer"], "offsets": [206]}}], "document": ["pre", "-", "trained", "contextual", "representations", "have", "led", "to", "dramatic", "performance", "improvements", "on", "a", "range", "of", "downstream", "tasks", ".", "such", "performance", "improvements", "have", "motivated", "researchers", "to", "quantify", "and", "understand", "the", "linguistic", "information", "encoded", "in", "these", "representations", ".", "in", "general", ",", "researchers", "quantify", "the", "amount", "of", "linguistic", "information", "through", "probing", ",", "an", "endeavor", "which", "consists", "of", "training", "a", "supervised", "model", "to", "predict", "a", "linguistic", "property", "directly", "from", "the", "contextual", "representations", ".", "unfortunately", ",", "this", "definition", "of", "probing", "has", "been", "subject", "to", "extensive", "criticism", "in", "the", "literature", ",", "and", "has", "been", "observed", "to", "lead", "to", "paradoxical", "and", "counter", "-", "intuitive", "results", ".", "in", "the", "theoretical", "portion", "of", "this", "paper", ",", "we", "take", "the", "position", "that", "the", "goal", "of", "probing", "ought", "to", "be", "measuring", "the", "amount", "of", "inductive", "bias", "that", "the", "representations", "encode", "on", "a", "specific", "task", ".", "we", "further", "describe", "a", "bayesian", "framework", "that", "operationalizes", "this", "goal", "and", "allows", "us", "to", "quantify", "the", "representations", "\u2019", "inductive", "bias", ".", "in", "the", "empirical", "portion", "of", "the", "paper", ",", "we", "apply", "our", "framework", "to", "a", "variety", "of", "nlp", "tasks", ".", "our", "results", "suggest", "that", "our", "proposed", "framework", "alleviates", "many", "previous", "problems", "found", "in", "probing", ".", "moreover", ",", "we", "are", "able", "to", "offer", "concrete", "evidence", "that", "\u2014", "for", "some", "tasks", "\u2014", "fasttext", "can", "offer", "a", "better", "inductive", "bias", "than", "bert", "."]}, {"venue": "ACL", "title": "Yes, we can! Mining Arguments in 50 Years of US Presidential Campaign Debates", "abstract": "Political debates offer a rare opportunity for citizens to compare the candidates\u2019 positions on the most controversial topics of the campaign. Thus they represent a natural application scenario for Argument Mining. As existing research lacks solid empirical investigation of the typology of argument components in political debates, we fill this gap by proposing an Argument Mining approach to political debates. We address this task in an empirical manner by annotating 39 political debates from the last 50 years of US presidential campaigns, creating a new corpus of 29k argument components, labeled as premises and claims. We then propose two tasks: (1) identifying the argumentative components in such debates, and (2) classifying them as premises and claims. We show that feature-rich SVM learners and Neural Network architectures outperform standard baselines in Argument Mining over such complex data. We release the new corpus USElecDeb60To16 and the accompanying software under free licenses to the research community.", "doc_id": "8747ab2481f69f752d963b1a3d2bca5d", "publication_year": 2019, "sentences": ["political debates offer a rare opportunity for citizens to compare the candidates \u2019 positions on the most controversial topics of the campaign .", "thus they represent a natural application scenario for argument mining .", "as existing research lacks solid empirical investigation of the typology of argument components in political debates , we fill this gap by proposing an argument mining approach to political debates .", "we address this task in an empirical manner by annotating 39 political debates from the last 50 years of us presidential campaigns , creating a new corpus of 29k argument components , labeled as premises and claims .", "we then propose two tasks : ( 1 ) identifying the argumentative components in such debates , and ( 2 ) classifying them as premises and claims .", "we show that feature - rich svm learners and neural network architectures outperform standard baselines in argument mining over such complex data .", "we release the new corpus uselecdeb60to16 and the accompanying software under free licenses to the research community ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural application scenario for argument mining", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "application", "scenario", "for", "argument", "mining"], "offsets": [27, 28, 29, 30, 31, 32]}], "trigger": {"text": "represent", "tokens": ["represent"], "offsets": [25]}}, {"event_type": "RWF", "arguments": [{"text": "lacks", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lacks"], "offsets": [37]}, {"text": "existing research", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "research"], "offsets": [35, 36]}], "trigger": {"text": "lacks", "tokens": ["lacks"], "offsets": [37]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [51]}, {"text": "argument mining approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["argument", "mining", "approach"], "offsets": [58, 59, 60]}, {"text": "political debates", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["political", "debates"], "offsets": [62, 63]}], "trigger": {"text": "proposing", "tokens": ["proposing"], "offsets": [56]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [65]}, {"text": "39 political debates", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["39", "political", "debates"], "offsets": [75, 76, 77]}], "trigger": {"text": "annotating", "tokens": ["annotating"], "offsets": [74]}}, {"event_type": "PRP", "arguments": [{"text": "corpus of 29k argument components", "nugget_type": "DST", "argument_type": "Content", "tokens": ["corpus", "of", "29k", "argument", "components"], "offsets": [91, 92, 93, 94, 95]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [65]}, {"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [66]}], "trigger": {"text": "creating", "tokens": ["creating"], "offsets": [88]}}, {"event_type": "PUR", "arguments": [{"text": "task", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["task"], "offsets": [68]}, {"text": "in an empirical manner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "an", "empirical", "manner"], "offsets": [69, 70, 71, 72]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [66]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [131]}, {"text": "outperform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperform"], "offsets": [143]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [132]}}, {"event_type": "CMP", "arguments": [{"text": "feature - rich svm learners", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["feature", "-", "rich", "svm", "learners"], "offsets": [134, 135, 136, 137, 138]}, {"text": "standard baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["standard", "baselines"], "offsets": [144, 145]}, {"text": "neural network architectures", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["neural", "network", "architectures"], "offsets": [140, 141, 142]}, {"text": "argument mining", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["argument", "mining"], "offsets": [147, 148]}, {"text": "over such complex data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "such", "complex", "data"], "offsets": [149, 150, 151, 152]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [143]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [154]}, {"text": "corpus uselecdeb60to16", "nugget_type": "DST", "argument_type": "Content", "tokens": ["corpus", "uselecdeb60to16"], "offsets": [158, 159]}, {"text": "accompanying software", "nugget_type": "APP", "argument_type": "Content", "tokens": ["accompanying", "software"], "offsets": [162, 163]}, {"text": "under free licenses", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["under", "free", "licenses"], "offsets": [164, 165, 166]}, {"text": "research community", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["research", "community"], "offsets": [169, 170]}], "trigger": {"text": "release", "tokens": ["release"], "offsets": [155]}}, {"event_type": "MDS", "arguments": [{"text": "argumentative components", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["argumentative", "components"], "offsets": [114, 115]}, {"text": "premises and claims", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["premises", "claims"], "offsets": [127, 129]}], "trigger": {"text": "classifying", "tokens": ["classifying"], "offsets": [124]}}, {"event_type": "MDS", "arguments": [{"text": "argumentative components", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["argumentative", "components"], "offsets": [114, 115]}, {"text": "debates", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["debates"], "offsets": [118]}], "trigger": {"text": "identifying", "tokens": ["identifying"], "offsets": [112]}}], "document": ["political", "debates", "offer", "a", "rare", "opportunity", "for", "citizens", "to", "compare", "the", "candidates", "\u2019", "positions", "on", "the", "most", "controversial", "topics", "of", "the", "campaign", ".", "thus", "they", "represent", "a", "natural", "application", "scenario", "for", "argument", "mining", ".", "as", "existing", "research", "lacks", "solid", "empirical", "investigation", "of", "the", "typology", "of", "argument", "components", "in", "political", "debates", ",", "we", "fill", "this", "gap", "by", "proposing", "an", "argument", "mining", "approach", "to", "political", "debates", ".", "we", "address", "this", "task", "in", "an", "empirical", "manner", "by", "annotating", "39", "political", "debates", "from", "the", "last", "50", "years", "of", "us", "presidential", "campaigns", ",", "creating", "a", "new", "corpus", "of", "29k", "argument", "components", ",", "labeled", "as", "premises", "and", "claims", ".", "we", "then", "propose", "two", "tasks", ":", "(", "1", ")", "identifying", "the", "argumentative", "components", "in", "such", "debates", ",", "and", "(", "2", ")", "classifying", "them", "as", "premises", "and", "claims", ".", "we", "show", "that", "feature", "-", "rich", "svm", "learners", "and", "neural", "network", "architectures", "outperform", "standard", "baselines", "in", "argument", "mining", "over", "such", "complex", "data", ".", "we", "release", "the", "new", "corpus", "uselecdeb60to16", "and", "the", "accompanying", "software", "under", "free", "licenses", "to", "the", "research", "community", "."]}, {"venue": "ACL", "title": "Reinforced Training Data Selection for Domain Adaptation", "abstract": "Supervised models suffer from the problem of domain shifting where distribution mismatch in the data across domains greatly affect model performance. To solve the problem, training data selection (TDS) has been proven to be a prospective solution for domain adaptation in leveraging appropriate data. However, conventional TDS methods normally requires a predefined threshold which is neither easy to set nor can be applied across tasks, and models are trained separately with the TDS process. To make TDS self-adapted to data and task, and to combine it with model training, in this paper, we propose a reinforcement learning (RL) framework that synchronously searches for training instances relevant to the target domain and learns better representations for them. A selection distribution generator (SDG) is designed to perform the selection and is updated according to the rewards computed from the selected data, where a predictor is included in the framework to ensure a task-specific model can be trained on the selected data and provides feedback to rewards. Experimental results from part-of-speech tagging, dependency parsing, and sentiment analysis, as well as ablation studies, illustrate that the proposed framework is not only effective in data selection and representation, but also generalized to accommodate different NLP tasks.", "doc_id": "095519f372f820e6781529170ac17d83", "publication_year": 2019, "sentences": ["supervised models suffer from the problem of domain shifting where distribution mismatch in the data across domains greatly affect model performance .", "to solve the problem , training data selection ( tds ) has been proven to be a prospective solution for domain adaptation in leveraging appropriate data .", "however , conventional tds methods normally requires a predefined threshold which is neither easy to set nor can be applied across tasks , and models are trained separately with the tds process .", "to make tds self - adapted to data and task , and to combine it with model training , in this paper , we propose a reinforcement learning ( rl ) framework that synchronously searches for training instances relevant to the target domain and learns better representations for them .", "a selection distribution generator ( sdg ) is designed to perform the selection and is updated according to the rewards computed from the selected data , where a predictor is included in the framework to ensure a task - specific model can be trained on the selected data and provides feedback to rewards .", "experimental results from part - of - speech tagging , dependency parsing , and sentiment analysis , as well as ablation studies , illustrate that the proposed framework is not only effective in data selection and representation , but also generalized to accommodate different nlp tasks ."], "events": [{"event_type": "RWF", "arguments": [{"text": "supervised models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["supervised", "models"], "offsets": [0, 1]}, {"text": "problem of domain shifting", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["problem", "of", "domain", "shifting"], "offsets": [5, 6, 7, 8]}], "trigger": {"text": "suffer", "tokens": ["suffer"], "offsets": [2]}}, {"event_type": "RWF", "arguments": [{"text": "distribution mismatch in the data across domains", "nugget_type": "WEA", "argument_type": "Concern", "tokens": ["distribution", "mismatch", "in", "the", "data", "across", "domains"], "offsets": [10, 11, 12, 13, 14, 15, 16]}, {"text": "model performance", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["model", "performance"], "offsets": [19, 20]}], "trigger": {"text": "greatly affect", "tokens": ["greatly", "affect"], "offsets": [17, 18]}}, {"event_type": "RWS", "arguments": [{"text": "solve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["solve"], "offsets": [23]}, {"text": "training data selection", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["training", "data", "selection"], "offsets": [27, 28, 29]}, {"text": "appropriate data", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["appropriate", "data"], "offsets": [46, 47]}, {"text": "domain adaptation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["domain", "adaptation"], "offsets": [42, 43]}], "trigger": {"text": "leveraging", "tokens": ["leveraging"], "offsets": [45]}}, {"event_type": "PUR", "arguments": [{"text": "problem of domain shifting", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["problem", "of", "domain", "shifting"], "offsets": [5, 6, 7, 8]}], "trigger": {"text": "solve", "tokens": ["solve"], "offsets": [23]}}, {"event_type": "RWF", "arguments": [{"text": "conventional tds methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["conventional", "training", "data", "selection", "methods"], "offsets": [51, 27, 28, 29, 53]}, {"text": "predefined threshold", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["predefined", "threshold"], "offsets": [57, 58]}], "trigger": {"text": "normally requires", "tokens": ["normally", "requires"], "offsets": [54, 55]}}, {"event_type": "RWF", "arguments": [{"text": "predefined threshold", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["predefined", "threshold"], "offsets": [57, 58]}, {"text": "neither easy", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["neither", "easy"], "offsets": [61, 62]}], "trigger": {"text": "neither easy", "tokens": ["neither", "easy"], "offsets": [61, 62]}}, {"event_type": "RWF", "arguments": [{"text": "predefined threshold", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["predefined", "threshold"], "offsets": [57, 58]}, {"text": "nor can be applied", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["nor", "can", "be", "applied"], "offsets": [65, 66, 67, 68]}], "trigger": {"text": "nor can be applied", "tokens": ["nor", "can", "be", "applied"], "offsets": [65, 66, 67, 68]}}, {"event_type": "RWF", "arguments": [{"text": "conventional tds methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["conventional", "training", "data", "selection", "methods"], "offsets": [51, 27, 28, 29, 53]}, {"text": "trained separately", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["trained", "separately"], "offsets": [75, 76]}], "trigger": {"text": "trained separately", "tokens": ["trained", "separately"], "offsets": [75, 76]}}, {"event_type": "PRP", "arguments": [{"text": "make tds self - adapted to data and task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["make", "tds", "self", "-", "adapted", "to", "data", "and", "task"], "offsets": [83, 84, 85, 86, 87, 88, 89, 90, 91]}, {"text": "combine it with model training", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["combine", "it", "with", "model", "training"], "offsets": [95, 96, 97, 98, 99]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [105]}, {"text": "reinforcement learning ( rl ) framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["reinforcement", "learning", "(", "rl", ")", "framework"], "offsets": [108, 109, 110, 111, 112, 113]}, {"text": "synchronously searches", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["synchronously", "searches"], "offsets": [115, 116]}, {"text": "learns", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learns"], "offsets": [126]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [106]}}, {"event_type": "PUR", "arguments": [{"text": "training instances relevant", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["training", "instances", "relevant"], "offsets": [118, 119, 120]}], "trigger": {"text": "synchronously searches", "tokens": ["synchronously", "searches"], "offsets": [115, 116]}}, {"event_type": "PUR", "arguments": [{"text": "better representations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["better", "representations"], "offsets": [127, 128]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [126]}}, {"event_type": "PRP", "arguments": [{"text": "selection distribution generator", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["selection", "distribution", "generator"], "offsets": [133, 134, 135]}, {"text": "perform", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["perform"], "offsets": [142]}], "trigger": {"text": "designed", "tokens": ["designed"], "offsets": [140]}}, {"event_type": "PUR", "arguments": [{"text": "selection", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["selection"], "offsets": [144]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [142]}}, {"event_type": "WKS", "arguments": [{"text": "selection distribution generator", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["selection", "distribution", "generator"], "offsets": [133, 134, 135]}, {"text": "according to the rewards computed from the selected data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["according", "to", "the", "rewards", "computed", "from", "the", "selected", "data"], "offsets": [148, 149, 150, 151, 152, 153, 154, 155, 156]}], "trigger": {"text": "updated", "tokens": ["updated"], "offsets": [147]}}, {"event_type": "MDS", "arguments": [{"text": "in the framework", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "reinforcement", "learning", "(", "rl", ")", "framework"], "offsets": [163, 108, 109, 110, 111, 112, 113]}, {"text": "predictor", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["predictor"], "offsets": [160]}, {"text": "ensure", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["ensure"], "offsets": [167]}, {"text": "provides", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["provides"], "offsets": [181]}], "trigger": {"text": "included", "tokens": ["included"], "offsets": [162]}}, {"event_type": "PUR", "arguments": [{"text": "task - specific model can be trained on the selected data", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["task", "-", "specific", "model", "can", "be", "trained", "on", "the", "selected", "data"], "offsets": [169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179]}], "trigger": {"text": "ensure", "tokens": ["ensure"], "offsets": [167]}}, {"event_type": "PUR", "arguments": [{"text": "feedback to rewards", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["feedback", "to", "rewards"], "offsets": [182, 183, 184]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [181]}}, {"event_type": "FAC", "arguments": [{"text": "reinforcement learning ( rl ) framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["reinforcement", "learning", "(", "rl", ")", "framework"], "offsets": [108, 109, 110, 111, 112, 113]}, {"text": "in data selection", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "data", "selection"], "offsets": [218, 219, 220]}, {"text": "in data representation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "data", "representation"], "offsets": [218, 219, 222]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [217]}}, {"event_type": "FAC", "arguments": [{"text": "reinforcement learning ( rl ) framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["reinforcement", "learning", "(", "rl", ")", "framework"], "offsets": [108, 109, 110, 111, 112, 113]}, {"text": "different nlp tasks", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["different", "nlp", "tasks"], "offsets": [229, 230, 231]}], "trigger": {"text": "generalized to accommodate", "tokens": ["generalized", "to", "accommodate"], "offsets": [226, 227, 228]}}], "document": ["supervised", "models", "suffer", "from", "the", "problem", "of", "domain", "shifting", "where", "distribution", "mismatch", "in", "the", "data", "across", "domains", "greatly", "affect", "model", "performance", ".", "to", "solve", "the", "problem", ",", "training", "data", "selection", "(", "tds", ")", "has", "been", "proven", "to", "be", "a", "prospective", "solution", "for", "domain", "adaptation", "in", "leveraging", "appropriate", "data", ".", "however", ",", "conventional", "tds", "methods", "normally", "requires", "a", "predefined", "threshold", "which", "is", "neither", "easy", "to", "set", "nor", "can", "be", "applied", "across", "tasks", ",", "and", "models", "are", "trained", "separately", "with", "the", "tds", "process", ".", "to", "make", "tds", "self", "-", "adapted", "to", "data", "and", "task", ",", "and", "to", "combine", "it", "with", "model", "training", ",", "in", "this", "paper", ",", "we", "propose", "a", "reinforcement", "learning", "(", "rl", ")", "framework", "that", "synchronously", "searches", "for", "training", "instances", "relevant", "to", "the", "target", "domain", "and", "learns", "better", "representations", "for", "them", ".", "a", "selection", "distribution", "generator", "(", "sdg", ")", "is", "designed", "to", "perform", "the", "selection", "and", "is", "updated", "according", "to", "the", "rewards", "computed", "from", "the", "selected", "data", ",", "where", "a", "predictor", "is", "included", "in", "the", "framework", "to", "ensure", "a", "task", "-", "specific", "model", "can", "be", "trained", "on", "the", "selected", "data", "and", "provides", "feedback", "to", "rewards", ".", "experimental", "results", "from", "part", "-", "of", "-", "speech", "tagging", ",", "dependency", "parsing", ",", "and", "sentiment", "analysis", ",", "as", "well", "as", "ablation", "studies", ",", "illustrate", "that", "the", "proposed", "framework", "is", "not", "only", "effective", "in", "data", "selection", "and", "representation", ",", "but", "also", "generalized", "to", "accommodate", "different", "nlp", "tasks", "."]}, {"venue": "ACL", "title": "Text-Free Image-to-Speech Synthesis Using Learned Segmental Units", "abstract": "In this paper we present the first model for directly synthesizing fluent, natural-sounding spoken audio captions for images that does not require natural language text as an intermediate representation or source of supervision. Instead, we connect the image captioning module and the speech synthesis module with a set of discrete, sub-word speech units that are discovered with a self-supervised visual grounding task. We conduct experiments on the Flickr8k spoken caption dataset in addition to a novel corpus of spoken audio captions collected for the popular MSCOCO dataset, demonstrating that our generated captions also capture diverse visual semantics of the images they describe. We investigate several different intermediate speech representations, and empirically find that the representation must satisfy several important properties to serve as drop-in replacements for text.", "doc_id": "4b447c290e9f0cfbceb15d9810506f29", "publication_year": 2021, "sentences": ["in this paper we present the first model for directly synthesizing fluent , natural - sounding spoken audio captions for images that does not require natural language text as an intermediate representation or source of supervision .", "instead , we connect the image captioning module and the speech synthesis module with a set of discrete , sub - word speech units that are discovered with a self - supervised visual grounding task .", "we conduct experiments on the flickr8k spoken caption dataset in addition to a novel corpus of spoken audio captions collected for the popular mscoco dataset , demonstrating that our generated captions also capture diverse visual semantics of the images they describe .", "we investigate several different intermediate speech representations , and empirically find that the representation must satisfy several important properties to serve as drop - in replacements for text ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [3]}, {"text": "model for directly synthesizing fluent , natural - sounding spoken audio captions", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model", "for", "directly", "synthesizing", "fluent", ",", "natural", "-", "sounding", "spoken", "audio", "captions"], "offsets": [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]}, {"text": "images", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["images"], "offsets": [20]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [4]}}, {"event_type": "MDS", "arguments": [{"text": "set of discrete , sub - word speech units", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["set", "of", "discrete", ",", "sub", "-", "word", "speech", "units"], "offsets": [52, 53, 54, 55, 56, 57, 58, 59, 60]}, {"text": "image captioning module", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["image", "captioning", "module"], "offsets": [42, 43, 44]}, {"text": "speech synthesis module", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["speech", "synthesis", "module"], "offsets": [47, 48, 49]}], "trigger": {"text": "connect", "tokens": ["connect"], "offsets": [40]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [73]}, {"text": "experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["experiments"], "offsets": [75]}, {"text": "flickr8k spoken caption dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["flickr8k", "spoken", "caption", "dataset"], "offsets": [78, 79, 80, 81]}, {"text": "corpus of spoken audio captions collected for the popular mscoco dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["corpus", "of", "spoken", "audio", "captions", "collected", "for", "the", "popular", "mscoco", "dataset"], "offsets": [87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [74]}}, {"event_type": "FIN", "arguments": [{"text": "capture", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["capture"], "offsets": [105]}], "trigger": {"text": "demonstrating", "tokens": ["demonstrating"], "offsets": [99]}}, {"event_type": "FAC", "arguments": [{"text": "diverse visual semantics of the images they describe", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["diverse", "visual", "semantics", "of", "the", "images", "they", "describe"], "offsets": [106, 107, 108, 109, 110, 111, 112, 113]}, {"text": "generated captions", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["generated", "captions"], "offsets": [102, 103]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [105]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [115]}, {"text": "several different intermediate speech representations", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["several", "different", "intermediate", "speech", "representations"], "offsets": [117, 118, 119, 120, 121]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [116]}}, {"event_type": "FIN", "arguments": [{"text": "must satisfy", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["must", "satisfy"], "offsets": [129, 130]}], "trigger": {"text": "empirically find", "tokens": ["empirically", "find"], "offsets": [124, 125]}}, {"event_type": "FAC", "arguments": [{"text": "representation", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["representation"], "offsets": [128]}, {"text": "several important properties", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["several", "important", "properties"], "offsets": [131, 132, 133]}, {"text": "serve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["serve"], "offsets": [135]}], "trigger": {"text": "must satisfy", "tokens": ["must", "satisfy"], "offsets": [129, 130]}}, {"event_type": "PUR", "arguments": [{"text": "drop - in replacements for text", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["drop", "-", "in", "replacements", "for", "text"], "offsets": [137, 138, 139, 140, 141, 142]}], "trigger": {"text": "serve", "tokens": ["serve"], "offsets": [135]}}], "document": ["in", "this", "paper", "we", "present", "the", "first", "model", "for", "directly", "synthesizing", "fluent", ",", "natural", "-", "sounding", "spoken", "audio", "captions", "for", "images", "that", "does", "not", "require", "natural", "language", "text", "as", "an", "intermediate", "representation", "or", "source", "of", "supervision", ".", "instead", ",", "we", "connect", "the", "image", "captioning", "module", "and", "the", "speech", "synthesis", "module", "with", "a", "set", "of", "discrete", ",", "sub", "-", "word", "speech", "units", "that", "are", "discovered", "with", "a", "self", "-", "supervised", "visual", "grounding", "task", ".", "we", "conduct", "experiments", "on", "the", "flickr8k", "spoken", "caption", "dataset", "in", "addition", "to", "a", "novel", "corpus", "of", "spoken", "audio", "captions", "collected", "for", "the", "popular", "mscoco", "dataset", ",", "demonstrating", "that", "our", "generated", "captions", "also", "capture", "diverse", "visual", "semantics", "of", "the", "images", "they", "describe", ".", "we", "investigate", "several", "different", "intermediate", "speech", "representations", ",", "and", "empirically", "find", "that", "the", "representation", "must", "satisfy", "several", "important", "properties", "to", "serve", "as", "drop", "-", "in", "replacements", "for", "text", "."]}, {"venue": "ACL", "title": "Cree Corpus: A Collection of n\u00eahiyaw\u00eawin Resources", "abstract": "Plains Cree (n\u00eahiyaw\u00eawin) is an Indigenous language that is spoken in Canada and the USA. It is the most widely spoken dialect of Cree and a morphologically complex language that is polysynthetic, highly inflective, and agglutinative. It is an extremely low resource language, with no existing corpus that is both available and prepared for supporting the development of language technologies. To support n\u00eahiyaw\u00eawin revitalization and preservation, we developed a corpus covering diverse genres, time periods, and texts for a variety of intended audiences. The data has been verified and cleaned; it is ready for use in developing language technologies for n\u00eahiyaw\u00eawin. The corpus includes the corresponding English phrases or audio files where available. We demonstrate the utility of the corpus through its community use and its use to build language technologies that can provide the types of support that community members have expressed are desirable. The corpus is available for public use.", "doc_id": "53b2c932d1fe04f91f942b4706e9dc0c", "publication_year": 2022, "sentences": ["plains cree ( nehiyawewin ) is an indigenous language that is spoken in canada and the usa .", "it is the most widely spoken dialect of cree and a morphologically complex language that is polysynthetic , highly inflective , and agglutinative .", "it is an extremely low resource language , with no existing corpus that is both available and prepared for supporting the development of language technologies .", "to support nehiyawewin revitalization and preservation , we developed a corpus covering diverse genres , time periods , and texts for a variety of intended audiences .", "the data has been verified and cleaned ; it is ready for use in developing language technologies for nehiyawewin .", "the corpus includes the corresponding english phrases or audio files where available .", "we demonstrate the utility of the corpus through its community use and its use to build language technologies that can provide the types of support that community members have expressed are desirable .", "the corpus is available for public use ."], "events": [{"event_type": "ITT", "arguments": [{"text": "plains cree", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["plains", "cree"], "offsets": [0, 1]}], "trigger": {"text": "indigenous language", "tokens": ["indigenous", "language"], "offsets": [7, 8]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [75]}, {"text": "variety of intended audiences", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["variety", "of", "intended", "audiences"], "offsets": [90, 91, 92, 93]}, {"text": "corpus", "nugget_type": "DST", "argument_type": "Content", "tokens": ["corpus"], "offsets": [78]}, {"text": "support", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["support"], "offsets": [69]}], "trigger": {"text": "developed", "tokens": ["developed"], "offsets": [76]}}, {"event_type": "PUR", "arguments": [{"text": "nehiyawewin revitalization", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["nehiyawewin", "revitalization"], "offsets": [70, 71]}, {"text": "nehiyawewin preservation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["nehiyawewin", "preservation"], "offsets": [70, 73]}], "trigger": {"text": "support", "tokens": ["support"], "offsets": [69]}}, {"event_type": "WKS", "arguments": [{"text": "data", "nugget_type": "DST", "argument_type": "Content", "tokens": ["data"], "offsets": [96]}], "trigger": {"text": "verified and cleaned", "tokens": ["verified", "and", "cleaned"], "offsets": [99, 100, 101]}}, {"event_type": "MDS", "arguments": [{"text": "corpus", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["corpus"], "offsets": [116]}, {"text": "corresponding english phrases", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["corresponding", "english", "phrases"], "offsets": [119, 120, 121]}, {"text": "audio files", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["audio", "files"], "offsets": [123, 124]}, {"text": "where available", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["where", "available"], "offsets": [125, 126]}], "trigger": {"text": "includes", "tokens": ["includes"], "offsets": [117]}}, {"event_type": "FAC", "arguments": [{"text": "types of support", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["types", "of", "support"], "offsets": [150, 151, 152]}, {"text": "that community members have expressed are desirable", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["that", "community", "members", "have", "expressed", "are", "desirable"], "offsets": [153, 154, 155, 156, 157, 158, 159]}, {"text": "utility of the corpus", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["utility", "of", "the", "corpus"], "offsets": [131, 132, 133, 134]}, {"text": "through its community use and its use to build language technologies", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "corpus", "community", "use", "and", "corpus", "use", "to", "build", "language", "technologies"], "offsets": [135, 134, 137, 138, 139, 134, 141, 142, 143, 144, 145]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [148]}}], "document": ["plains", "cree", "(", "nehiyawewin", ")", "is", "an", "indigenous", "language", "that", "is", "spoken", "in", "canada", "and", "the", "usa", ".", "it", "is", "the", "most", "widely", "spoken", "dialect", "of", "cree", "and", "a", "morphologically", "complex", "language", "that", "is", "polysynthetic", ",", "highly", "inflective", ",", "and", "agglutinative", ".", "it", "is", "an", "extremely", "low", "resource", "language", ",", "with", "no", "existing", "corpus", "that", "is", "both", "available", "and", "prepared", "for", "supporting", "the", "development", "of", "language", "technologies", ".", "to", "support", "nehiyawewin", "revitalization", "and", "preservation", ",", "we", "developed", "a", "corpus", "covering", "diverse", "genres", ",", "time", "periods", ",", "and", "texts", "for", "a", "variety", "of", "intended", "audiences", ".", "the", "data", "has", "been", "verified", "and", "cleaned", ";", "it", "is", "ready", "for", "use", "in", "developing", "language", "technologies", "for", "nehiyawewin", ".", "the", "corpus", "includes", "the", "corresponding", "english", "phrases", "or", "audio", "files", "where", "available", ".", "we", "demonstrate", "the", "utility", "of", "the", "corpus", "through", "its", "community", "use", "and", "its", "use", "to", "build", "language", "technologies", "that", "can", "provide", "the", "types", "of", "support", "that", "community", "members", "have", "expressed", "are", "desirable", ".", "the", "corpus", "is", "available", "for", "public", "use", "."]}, {"venue": "ACL", "title": "An Embarrassingly Simple Method to Mitigate Undesirable Properties of Pretrained Language Model Tokenizers", "abstract": "We introduce FLOTA (Few Longest Token Approximation), a simple yet effective method to improve the tokenization of pretrained language models (PLMs). FLOTA uses the vocabulary of a standard tokenizer but tries to preserve the morphological structure of words during tokenization. We evaluate FLOTA on morphological gold segmentations as well as a text classification task, using BERT, GPT-2, and XLNet as example PLMs. FLOTA leads to performance gains, makes inference more efficient, and enhances the robustness of PLMs with respect to whitespace noise.", "doc_id": "a14d4c6baa882cdb60ff84be0edb51fc", "publication_year": 2022, "sentences": ["we introduce flota ( few longest token approximation ) , a simple yet effective method to improve the tokenization of pretrained language models ( plms ) .", "flota uses the vocabulary of a standard tokenizer but tries to preserve the morphological structure of words during tokenization .", "we evaluate flota on morphological gold segmentations as well as a text classification task , using bert , gpt - 2 , and xlnet as example plms .", "flota leads to performance gains , makes inference more efficient , and enhances the robustness of plms with respect to whitespace noise ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "flota", "nugget_type": "APP", "argument_type": "Content", "tokens": ["flota"], "offsets": [2]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [16]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "tokenization of pretrained language models", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["tokenization", "of", "pretrained", "language", "models"], "offsets": [18, 19, 20, 21, 22]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [16]}}, {"event_type": "MDS", "arguments": [{"text": "vocabulary of a standard tokenizer", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["vocabulary", "of", "a", "standard", "tokenizer"], "offsets": [30, 31, 32, 33, 34]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [28]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [47]}, {"text": "bert", "nugget_type": "DST", "argument_type": "Content", "tokens": ["bert"], "offsets": [63]}, {"text": "gpt - 2", "nugget_type": "DST", "argument_type": "Content", "tokens": ["gpt", "-", "2"], "offsets": [65, 66, 67]}, {"text": "xlnet", "nugget_type": "DST", "argument_type": "Content", "tokens": ["xlnet"], "offsets": [70]}, {"text": "evaluate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluate"], "offsets": [48]}, {"text": "as example plms", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "example", "plms"], "offsets": [71, 72, 73]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [62]}}, {"event_type": "PUR", "arguments": [{"text": "flota", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["flota"], "offsets": [49]}, {"text": "on morphological gold segmentations as well as a text classification task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "morphological", "gold", "segmentations", "as", "well", "as", "a", "text", "classification", "task"], "offsets": [50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [48]}}, {"event_type": "FAC", "arguments": [{"text": "flota", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["flota"], "offsets": [75]}, {"text": "performance gains", "nugget_type": "STR", "argument_type": "Object", "tokens": ["performance", "gains"], "offsets": [78, 79]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [76]}}, {"event_type": "FAC", "arguments": [{"text": "flota", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["flota"], "offsets": [75]}, {"text": "inference more efficient", "nugget_type": "STR", "argument_type": "Object", "tokens": ["inference", "more", "efficient"], "offsets": [82, 83, 84]}], "trigger": {"text": "makes", "tokens": ["makes"], "offsets": [81]}}, {"event_type": "FAC", "arguments": [{"text": "flota", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["flota"], "offsets": [75]}, {"text": "with respect to whitespace noise", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "respect", "to", "whitespace", "noise"], "offsets": [92, 93, 94, 95, 96]}, {"text": "robustness of plms", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["robustness", "of", "plms"], "offsets": [89, 90, 91]}], "trigger": {"text": "enhances", "tokens": ["enhances"], "offsets": [87]}}, {"event_type": "MDS", "arguments": [{"text": "morphological structure of words", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["morphological", "structure", "of", "words"], "offsets": [40, 41, 42, 43]}, {"text": "during tokenization", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "tokenization"], "offsets": [44, 45]}], "trigger": {"text": "preserve", "tokens": ["preserve"], "offsets": [38]}}], "document": ["we", "introduce", "flota", "(", "few", "longest", "token", "approximation", ")", ",", "a", "simple", "yet", "effective", "method", "to", "improve", "the", "tokenization", "of", "pretrained", "language", "models", "(", "plms", ")", ".", "flota", "uses", "the", "vocabulary", "of", "a", "standard", "tokenizer", "but", "tries", "to", "preserve", "the", "morphological", "structure", "of", "words", "during", "tokenization", ".", "we", "evaluate", "flota", "on", "morphological", "gold", "segmentations", "as", "well", "as", "a", "text", "classification", "task", ",", "using", "bert", ",", "gpt", "-", "2", ",", "and", "xlnet", "as", "example", "plms", ".", "flota", "leads", "to", "performance", "gains", ",", "makes", "inference", "more", "efficient", ",", "and", "enhances", "the", "robustness", "of", "plms", "with", "respect", "to", "whitespace", "noise", "."]}, {"venue": "ACL", "title": "Relabel the Noise: Joint Extraction of Entities and Relations via Cooperative Multiagents", "abstract": "Distant supervision based methods for entity and relation extraction have received increasing popularity due to the fact that these methods require light human annotation efforts. In this paper, we consider the problem of shifted label distribution, which is caused by the inconsistency between the noisy-labeled training set subject to external knowledge graph and the human-annotated test set, and exacerbated by the pipelined entity-then-relation extraction manner with noise propagation. We propose a joint extraction approach to address this problem by re-labeling noisy instances with a group of cooperative multiagents. To handle noisy instances in a fine-grained manner, each agent in the cooperative group evaluates the instance by calculating a continuous confidence score from its own perspective; To leverage the correlations between these two extraction tasks, a confidence consensus module is designed to gather the wisdom of all agents and re-distribute the noisy training set with confidence-scored labels. Further, the confidences are used to adjust the training losses of extractors. Experimental results on two real-world datasets verify the benefits of re-labeling noisy instance, and show that the proposed model significantly outperforms the state-of-the-art entity and relation extraction methods.", "doc_id": "75371be759ae61c9cc0d3e57c64dc235", "publication_year": 2020, "sentences": ["distant supervision based methods for entity and relation extraction have received increasing popularity due to the fact that these methods require light human annotation efforts .", "in this paper , we consider the problem of shifted label distribution , which is caused by the inconsistency between the noisy - labeled training set subject to external knowledge graph and the human - annotated test set , and exacerbated by the pipelined entity - then - relation extraction manner with noise propagation .", "we propose a joint extraction approach to address this problem by re - labeling noisy instances with a group of cooperative multiagents .", "to handle noisy instances in a fine - grained manner , each agent in the cooperative group evaluates the instance by calculating a continuous confidence score from its own perspective ; to leverage the correlations between these two extraction tasks , a confidence consensus module is designed to gather the wisdom of all agents and re - distribute the noisy training set with confidence - scored labels .", "further , the confidences are used to adjust the training losses of extractors .", "experimental results on two real - world datasets verify the benefits of re - labeling noisy instance , and show that the proposed model significantly outperforms the state - of - the - art entity and relation extraction methods ."], "events": [{"event_type": "ITT", "arguments": [{"text": "distant supervision based methods for entity and relation extraction", "nugget_type": "APP", "argument_type": "Target", "tokens": ["distant", "supervision", "based", "methods", "for", "entity", "relation", "extraction"], "offsets": [0, 1, 2, 3, 4, 5, 7, 8]}], "trigger": {"text": "received", "tokens": ["received"], "offsets": [10]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [30]}, {"text": "problem of shifted label distribution", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["problem", "of", "shifted", "label", "distribution"], "offsets": [33, 34, 35, 36, 37]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [31]}}, {"event_type": "RWF", "arguments": [{"text": "human - annotated test set", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["human", "-", "annotated", "test", "set"], "offsets": [59, 60, 61, 62, 63]}, {"text": "noisy - labeled training set subject to external knowledge graph", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["noisy", "-", "labeled", "training", "set", "subject", "to", "external", "knowledge", "graph"], "offsets": [47, 48, 49, 50, 51, 52, 53, 54, 55, 56]}], "trigger": {"text": "inconsistency", "tokens": ["inconsistency"], "offsets": [44]}}, {"event_type": "RWF", "arguments": [{"text": "pipelined entity - then - relation extraction manner", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["pipelined", "entity", "-", "then", "-", "relation", "extraction", "manner"], "offsets": [69, 70, 71, 72, 73, 74, 75, 76]}], "trigger": {"text": "noise propagation", "tokens": ["noise", "propagation"], "offsets": [78, 79]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [81]}, {"text": "joint extraction approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["joint", "extraction", "approach"], "offsets": [84, 85, 86]}, {"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [88]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [82]}}, {"event_type": "MDS", "arguments": [{"text": "noisy instances", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["noisy", "instances"], "offsets": [95, 96]}, {"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [88]}, {"text": "group of cooperative multiagents", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["group", "of", "cooperative", "multiagents"], "offsets": [99, 100, 101, 102]}], "trigger": {"text": "re - labeling", "tokens": ["re", "-", "labeling"], "offsets": [92, 93, 94]}}, {"event_type": "PUR", "arguments": [{"text": "problem of shifted label distribution", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["problem", "of", "shifted", "label", "distribution"], "offsets": [33, 34, 35, 36, 37]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [88]}}, {"event_type": "MDS", "arguments": [{"text": "handle", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["handle"], "offsets": [105]}, {"text": "continuous confidence score", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["continuous", "confidence", "score"], "offsets": [127, 128, 129]}, {"text": "instance", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["instance"], "offsets": [123]}], "trigger": {"text": "calculating", "tokens": ["calculating"], "offsets": [125]}}, {"event_type": "PUR", "arguments": [{"text": "noisy instances", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["noisy", "instances"], "offsets": [106, 107]}, {"text": "in a fine - grained manner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "fine", "-", "grained", "manner"], "offsets": [108, 109, 110, 111, 112, 113]}], "trigger": {"text": "handle", "tokens": ["handle"], "offsets": [105]}}, {"event_type": "PRP", "arguments": [{"text": "leverage", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["leverage"], "offsets": [136]}, {"text": "confidence consensus module", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["confidence", "consensus", "module"], "offsets": [146, 147, 148]}, {"text": "gather the wisdom of all agents", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["gather", "the", "wisdom", "of", "all", "agents"], "offsets": [152, 153, 154, 155, 156, 157]}], "trigger": {"text": "designed", "tokens": ["designed"], "offsets": [150]}}, {"event_type": "PUR", "arguments": [{"text": "correlations between these two extraction tasks", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["correlations", "between", "entity", "relation", "extraction", "tasks"], "offsets": [138, 139, 5, 7, 8, 143]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [136]}}, {"event_type": "PUR", "arguments": [{"text": "wisdom of all agents", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["wisdom", "of", "all", "agents"], "offsets": [154, 155, 156, 157]}], "trigger": {"text": "gather", "tokens": ["gather"], "offsets": [152]}}, {"event_type": "MDS", "arguments": [{"text": "confidence - scored labels", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["confidence", "-", "scored", "labels"], "offsets": [167, 168, 169, 170]}, {"text": "noisy training set", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["noisy", "training", "set"], "offsets": [163, 164, 165]}], "trigger": {"text": "re - distribute", "tokens": ["re", "-", "distribute"], "offsets": [159, 160, 161]}}, {"event_type": "WKS", "arguments": [{"text": "confidences", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["confidences"], "offsets": [175]}, {"text": "adjust", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["adjust"], "offsets": [179]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [177]}}, {"event_type": "PUR", "arguments": [{"text": "training losses of extractors", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["training", "losses", "of", "extractors"], "offsets": [181, 182, 183, 184]}], "trigger": {"text": "adjust", "tokens": ["adjust"], "offsets": [179]}}, {"event_type": "FAC", "arguments": [{"text": "two real - world datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "real", "-", "world", "datasets"], "offsets": [189, 190, 191, 192, 193]}, {"text": "benefits of re - labeling noisy instance", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["benefits", "of", "re", "-", "labeling", "noisy", "instance"], "offsets": [196, 197, 198, 199, 200, 201, 202]}], "trigger": {"text": "verify", "tokens": ["verify"], "offsets": [194]}}, {"event_type": "CMP", "arguments": [{"text": "two real - world datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "real", "-", "world", "datasets"], "offsets": [189, 190, 191, 192, 193]}, {"text": "joint extraction approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["joint", "extraction", "approach"], "offsets": [84, 85, 86]}, {"text": "significantly outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significantly", "outperforms"], "offsets": [210, 211]}], "trigger": {"text": "significantly outperforms", "tokens": ["significantly", "outperforms"], "offsets": [210, 211]}}], "document": ["distant", "supervision", "based", "methods", "for", "entity", "and", "relation", "extraction", "have", "received", "increasing", "popularity", "due", "to", "the", "fact", "that", "these", "methods", "require", "light", "human", "annotation", "efforts", ".", "in", "this", "paper", ",", "we", "consider", "the", "problem", "of", "shifted", "label", "distribution", ",", "which", "is", "caused", "by", "the", "inconsistency", "between", "the", "noisy", "-", "labeled", "training", "set", "subject", "to", "external", "knowledge", "graph", "and", "the", "human", "-", "annotated", "test", "set", ",", "and", "exacerbated", "by", "the", "pipelined", "entity", "-", "then", "-", "relation", "extraction", "manner", "with", "noise", "propagation", ".", "we", "propose", "a", "joint", "extraction", "approach", "to", "address", "this", "problem", "by", "re", "-", "labeling", "noisy", "instances", "with", "a", "group", "of", "cooperative", "multiagents", ".", "to", "handle", "noisy", "instances", "in", "a", "fine", "-", "grained", "manner", ",", "each", "agent", "in", "the", "cooperative", "group", "evaluates", "the", "instance", "by", "calculating", "a", "continuous", "confidence", "score", "from", "its", "own", "perspective", ";", "to", "leverage", "the", "correlations", "between", "these", "two", "extraction", "tasks", ",", "a", "confidence", "consensus", "module", "is", "designed", "to", "gather", "the", "wisdom", "of", "all", "agents", "and", "re", "-", "distribute", "the", "noisy", "training", "set", "with", "confidence", "-", "scored", "labels", ".", "further", ",", "the", "confidences", "are", "used", "to", "adjust", "the", "training", "losses", "of", "extractors", ".", "experimental", "results", "on", "two", "real", "-", "world", "datasets", "verify", "the", "benefits", "of", "re", "-", "labeling", "noisy", "instance", ",", "and", "show", "that", "the", "proposed", "model", "significantly", "outperforms", "the", "state", "-", "of", "-", "the", "-", "art", "entity", "and", "relation", "extraction", "methods", "."]}, {"venue": "ACL", "title": "Prototypical Verbalizer for Prompt-based Few-shot Tuning", "abstract": "Prompt-based tuning for pre-trained language models (PLMs) has shown its effectiveness in few-shot learning. Typically, prompt-based tuning wraps the input text into a cloze question. To make predictions, the model maps the output words to labels via a verbalizer, which is either manually designed or automatically built. However, manual verbalizers heavily depend on domain-specific prior knowledge and human efforts, while finding appropriate label words automatically still remains challenging.In this work, we propose the prototypical verbalizer (ProtoVerb) which is built directly from training data. Specifically, ProtoVerb learns prototype vectors as verbalizers by contrastive learning. In this way, the prototypes summarize training instances and are able to enclose rich class-level semantics. We conduct experiments on both topic classification and entity typing tasks, and the results demonstrate that ProtoVerb significantly outperforms current automatic verbalizers, especially when training data is extremely scarce. More surprisingly, ProtoVerb consistently boosts prompt-based tuning even on untuned PLMs, indicating an elegant non-tuning way to utilize PLMs. Our codes are avaliable at https://github.com/thunlp/OpenPrompt.", "doc_id": "c5f75fadf783e54e471bb3d7adc9be50", "publication_year": 2022, "sentences": ["prompt - based tuning for pre - trained language models ( plms ) has shown its effectiveness in few - shot learning .", "typically , prompt - based tuning wraps the input text into a cloze question .", "to make predictions , the model maps the output words to labels via a verbalizer , which is either manually designed or automatically built .", "however , manual verbalizers heavily depend on domain - specific prior knowledge and human efforts , while finding appropriate label words automatically still remains challenging .", "in this work , we propose the prototypical verbalizer ( protoverb ) which is built directly from training data .", "specifically , protoverb learns prototype vectors as verbalizers by contrastive learning .", "in this way , the prototypes summarize training instances and are able to enclose rich class - level semantics .", "we conduct experiments on both topic classification and entity typing tasks , and the results demonstrate that protoverb significantly outperforms current automatic verbalizers , especially when training data is extremely scarce .", "more surprisingly , protoverb consistently boosts prompt - based tuning even on untuned plms , indicating an elegant non - tuning way to utilize plms .", "our codes are avaliable at https : / / github . com / thunlp / openprompt ."], "events": [{"event_type": "ITT", "arguments": [{"text": "prompt - based tuning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["prompt", "-", "based", "tuning"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [14]}}, {"event_type": "RWS", "arguments": [{"text": "input text", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["input", "text"], "offsets": [31, 32]}, {"text": "cloze question", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["cloze", "question"], "offsets": [35, 36]}], "trigger": {"text": "wraps", "tokens": ["wraps"], "offsets": [29]}}, {"event_type": "RWS", "arguments": [{"text": "output words", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["output", "words"], "offsets": [46, 47]}, {"text": "labels", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["labels"], "offsets": [49]}, {"text": "verbalizer", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["verbalizer"], "offsets": [52]}], "trigger": {"text": "maps", "tokens": ["maps"], "offsets": [44]}}, {"event_type": "RWF", "arguments": [{"text": "manual verbalizers", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["manual", "verbalizers"], "offsets": [65, 66]}, {"text": "heavily depend", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["heavily", "depend"], "offsets": [67, 68]}, {"text": "domain - specific prior knowledge", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["domain", "-", "specific", "prior", "knowledge"], "offsets": [70, 71, 72, 73, 74]}], "trigger": {"text": "heavily depend", "tokens": ["heavily", "depend"], "offsets": [67, 68]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [93]}, {"text": "prototypical verbalizer", "nugget_type": "APP", "argument_type": "Content", "tokens": ["prototypical", "verbalizer"], "offsets": [96, 97]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [94]}}, {"event_type": "MDS", "arguments": [{"text": "prototypical verbalizer", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["prototypical", "verbalizer"], "offsets": [96, 97]}, {"text": "training data", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["training", "data"], "offsets": [106, 107]}], "trigger": {"text": "built", "tokens": ["built"], "offsets": [103]}}, {"event_type": "MDS", "arguments": [{"text": "prototypes", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["prototypes"], "offsets": [126]}, {"text": "training instances", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["training", "instances"], "offsets": [128, 129]}, {"text": "enclose", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enclose"], "offsets": [134]}], "trigger": {"text": "summarize", "tokens": ["summarize"], "offsets": [127]}}, {"event_type": "PUR", "arguments": [{"text": "rich class - level semantics", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["rich", "class", "-", "level", "semantics"], "offsets": [135, 136, 137, 138, 139]}], "trigger": {"text": "enclose", "tokens": ["enclose"], "offsets": [134]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [160]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [156]}}, {"event_type": "CMP", "arguments": [{"text": "prototypical verbalizer", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["prototypical", "verbalizer"], "offsets": [96, 97]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [159]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [160]}, {"text": "current automatic verbalizers", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["current", "automatic", "verbalizers"], "offsets": [161, 162, 163]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [160]}}, {"event_type": "FAC", "arguments": [{"text": "consistently", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["consistently"], "offsets": [177]}, {"text": "prompt - based tuning", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["prompt", "-", "based", "tuning"], "offsets": [179, 180, 181, 182]}, {"text": "on untuned plms", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "untuned", "pre", "-", "trained", "language", "models"], "offsets": [184, 185, 5, 6, 7, 8, 9]}, {"text": "prototypical verbalizer", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["prototypical", "verbalizer"], "offsets": [96, 97]}], "trigger": {"text": "boosts", "tokens": ["boosts"], "offsets": [178]}}, {"event_type": "FAC", "arguments": [{"text": "elegant non - tuning way", "nugget_type": "STR", "argument_type": "Object", "tokens": ["elegant", "non", "-", "tuning", "way"], "offsets": [190, 191, 192, 193, 194]}, {"text": "utilize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["utilize"], "offsets": [196]}], "trigger": {"text": "indicating", "tokens": ["indicating"], "offsets": [188]}}, {"event_type": "PUR", "arguments": [{"text": "plms", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["pre", "-", "trained", "language", "models"], "offsets": [5, 6, 7, 8, 9]}], "trigger": {"text": "utilize", "tokens": ["utilize"], "offsets": [196]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [141]}, {"text": "experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["experiments"], "offsets": [143]}, {"text": "topic classification tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["topic", "classification", "tasks"], "offsets": [146, 147, 151]}, {"text": "entity typing tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["entity", "typing", "tasks"], "offsets": [149, 150, 151]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [142]}}, {"event_type": "MDS", "arguments": [{"text": "prototype vectors", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["prototype", "vectors"], "offsets": [113, 114]}, {"text": "verbalizers", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["verbalizers"], "offsets": [116]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [112]}}], "document": ["prompt", "-", "based", "tuning", "for", "pre", "-", "trained", "language", "models", "(", "plms", ")", "has", "shown", "its", "effectiveness", "in", "few", "-", "shot", "learning", ".", "typically", ",", "prompt", "-", "based", "tuning", "wraps", "the", "input", "text", "into", "a", "cloze", "question", ".", "to", "make", "predictions", ",", "the", "model", "maps", "the", "output", "words", "to", "labels", "via", "a", "verbalizer", ",", "which", "is", "either", "manually", "designed", "or", "automatically", "built", ".", "however", ",", "manual", "verbalizers", "heavily", "depend", "on", "domain", "-", "specific", "prior", "knowledge", "and", "human", "efforts", ",", "while", "finding", "appropriate", "label", "words", "automatically", "still", "remains", "challenging", ".", "in", "this", "work", ",", "we", "propose", "the", "prototypical", "verbalizer", "(", "protoverb", ")", "which", "is", "built", "directly", "from", "training", "data", ".", "specifically", ",", "protoverb", "learns", "prototype", "vectors", "as", "verbalizers", "by", "contrastive", "learning", ".", "in", "this", "way", ",", "the", "prototypes", "summarize", "training", "instances", "and", "are", "able", "to", "enclose", "rich", "class", "-", "level", "semantics", ".", "we", "conduct", "experiments", "on", "both", "topic", "classification", "and", "entity", "typing", "tasks", ",", "and", "the", "results", "demonstrate", "that", "protoverb", "significantly", "outperforms", "current", "automatic", "verbalizers", ",", "especially", "when", "training", "data", "is", "extremely", "scarce", ".", "more", "surprisingly", ",", "protoverb", "consistently", "boosts", "prompt", "-", "based", "tuning", "even", "on", "untuned", "plms", ",", "indicating", "an", "elegant", "non", "-", "tuning", "way", "to", "utilize", "plms", ".", "our", "codes", "are", "avaliable", "at", "https", ":", "/", "/", "github", ".", "com", "/", "thunlp", "/", "openprompt", "."]}, {"venue": "ACL", "title": "Model-Agnostic Meta-Learning for Relation Classification with Limited Supervision", "abstract": "In this paper we frame the task of supervised relation classification as an instance of meta-learning. We propose a model-agnostic meta-learning protocol for training relation classifiers to achieve enhanced predictive performance in limited supervision settings. During training, we aim to not only learn good parameters for classifying relations with sufficient supervision, but also learn model parameters that can be fine-tuned to enhance predictive performance for relations with limited supervision. In experiments conducted on two relation classification datasets, we demonstrate that the proposed meta-learning approach improves the predictive performance of two state-of-the-art supervised relation classification models.", "doc_id": "50292efdaf2b4a536f950cca444faa67", "publication_year": 2019, "sentences": ["in this paper we frame the task of supervised relation classification as an instance of meta - learning .", "we propose a model - agnostic meta - learning protocol for training relation classifiers to achieve enhanced predictive performance in limited supervision settings .", "during training , we aim to not only learn good parameters for classifying relations with sufficient supervision , but also learn model parameters that can be fine - tuned to enhance predictive performance for relations with limited supervision .", "in experiments conducted on two relation classification datasets , we demonstrate that the proposed meta - learning approach improves the predictive performance of two state - of - the - art supervised relation classification models ."], "events": [{"event_type": "ITT", "arguments": [{"text": "supervised relation classification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["supervised", "relation", "classification"], "offsets": [8, 9, 10]}], "trigger": {"text": "frame", "tokens": ["frame"], "offsets": [4]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [19]}, {"text": "model - agnostic meta - learning protocol", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model", "-", "agnostic", "meta", "-", "learning", "protocol"], "offsets": [22, 23, 24, 25, 26, 27, 28]}, {"text": "achieve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["achieve"], "offsets": [34]}, {"text": "training", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["training"], "offsets": [30]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [20]}}, {"event_type": "PUR", "arguments": [{"text": "in limited supervision settings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "limited", "supervision", "settings"], "offsets": [38, 39, 40, 41]}, {"text": "enhanced predictive performance", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["enhanced", "predictive", "performance"], "offsets": [35, 36, 37]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [34]}}, {"event_type": "MDS", "arguments": [{"text": "during training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "training"], "offsets": [43, 44]}, {"text": "good parameters", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["good", "parameters"], "offsets": [52, 53]}, {"text": "classifying", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["classifying"], "offsets": [55]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [51]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [91]}, {"text": "improves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["improves"], "offsets": [100]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [92]}}, {"event_type": "PUR", "arguments": [{"text": "relation classifiers", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["relation", "classifiers"], "offsets": [31, 32]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [30]}}, {"event_type": "PUR", "arguments": [{"text": "relations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["relations"], "offsets": [56]}, {"text": "with sufficient supervision", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "sufficient", "supervision"], "offsets": [57, 58, 59]}], "trigger": {"text": "classifying", "tokens": ["classifying"], "offsets": [55]}}, {"event_type": "MDS", "arguments": [{"text": "model parameters", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["model", "parameters"], "offsets": [64, 65]}, {"text": "enhance", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enhance"], "offsets": [73]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [63]}}, {"event_type": "PUR", "arguments": [{"text": "predictive performance for relations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["predictive", "performance", "for", "relations"], "offsets": [74, 75, 76, 77]}, {"text": "with limited supervision", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "limited", "supervision"], "offsets": [78, 79, 80]}], "trigger": {"text": "enhance", "tokens": ["enhance"], "offsets": [73]}}, {"event_type": "FAC", "arguments": [{"text": "two relation classification datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "relation", "classification", "datasets"], "offsets": [86, 87, 88, 89]}, {"text": "meta - learning approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["meta", "-", "learning", "approach"], "offsets": [96, 97, 98, 99]}, {"text": "predictive performance of two state - of - the - art supervised relation classification models", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["predictive", "performance", "of", "two", "state", "-", "of", "-", "the", "-", "art", "supervised", "relation", "classification", "models"], "offsets": [102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [100]}}], "document": ["in", "this", "paper", "we", "frame", "the", "task", "of", "supervised", "relation", "classification", "as", "an", "instance", "of", "meta", "-", "learning", ".", "we", "propose", "a", "model", "-", "agnostic", "meta", "-", "learning", "protocol", "for", "training", "relation", "classifiers", "to", "achieve", "enhanced", "predictive", "performance", "in", "limited", "supervision", "settings", ".", "during", "training", ",", "we", "aim", "to", "not", "only", "learn", "good", "parameters", "for", "classifying", "relations", "with", "sufficient", "supervision", ",", "but", "also", "learn", "model", "parameters", "that", "can", "be", "fine", "-", "tuned", "to", "enhance", "predictive", "performance", "for", "relations", "with", "limited", "supervision", ".", "in", "experiments", "conducted", "on", "two", "relation", "classification", "datasets", ",", "we", "demonstrate", "that", "the", "proposed", "meta", "-", "learning", "approach", "improves", "the", "predictive", "performance", "of", "two", "state", "-", "of", "-", "the", "-", "art", "supervised", "relation", "classification", "models", "."]}, {"venue": "ACL", "title": "Writing by Memorizing: Hierarchical Retrieval-based Medical Report Generation", "abstract": "Medical report generation is one of the most challenging tasks in medical image analysis. Although existing approaches have achieved promising results, they either require a predefined template database in order to retrieve sentences or ignore the hierarchical nature of medical report generation. To address these issues, we propose MedWriter that incorporates a novel hierarchical retrieval mechanism to automatically extract both report and sentence-level templates for clinically accurate report generation. MedWriter first employs the Visual-Language Retrieval (VLR) module to retrieve the most relevant reports for the given images. To guarantee the logical coherence between generated sentences, the Language-Language Retrieval (LLR) module is introduced to retrieve relevant sentences based on the previous generated description. At last, a language decoder fuses image features and features from retrieved reports and sentences to generate meaningful medical reports. We verified the effectiveness of our model by automatic evaluation and human evaluation on two datasets, i.e., Open-I and MIMIC-CXR.", "doc_id": "971cfb7f824f507fbdedfda0c81a3d98", "publication_year": 2021, "sentences": ["medical report generation is one of the most challenging tasks in medical image analysis .", "although existing approaches have achieved promising results , they either require a predefined template database in order to retrieve sentences or ignore the hierarchical nature of medical report generation .", "to address these issues , we propose medwriter that incorporates a novel hierarchical retrieval mechanism to automatically extract both report and sentence - level templates for clinically accurate report generation .", "medwriter first employs the visual - language retrieval ( vlr ) module to retrieve the most relevant reports for the given images .", "to guarantee the logical coherence between generated sentences , the language - language retrieval ( llr ) module is introduced to retrieve relevant sentences based on the previous generated description .", "at last , a language decoder fuses image features and features from retrieved reports and sentences to generate meaningful medical reports .", "we verified the effectiveness of our model by automatic evaluation and human evaluation on two datasets , i . e . , open - i and mimic - cxr ."], "events": [{"event_type": "ITT", "arguments": [{"text": "medical image analysis", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["medical", "image", "analysis"], "offsets": [11, 12, 13]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "ignore", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["ignore"], "offsets": [36]}], "trigger": {"text": "ignore", "tokens": ["ignore"], "offsets": [36]}}, {"event_type": "PRP", "arguments": [{"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [46]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [50]}, {"text": "medwriter", "nugget_type": "APP", "argument_type": "Content", "tokens": ["medwriter"], "offsets": [52]}, {"text": "clinically accurate report generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["clinically", "accurate", "report", "generation"], "offsets": [71, 72, 73, 74]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [51]}}, {"event_type": "WKS", "arguments": [{"text": "guarantee", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["guarantee"], "offsets": [100]}, {"text": "language - language retrieval", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["language", "-", "language", "retrieval"], "offsets": [109, 110, 111, 112]}], "trigger": {"text": "introduced", "tokens": ["introduced"], "offsets": [118]}}, {"event_type": "PUR", "arguments": [{"text": "logical coherence between generated sentences", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["logical", "coherence", "between", "generated", "sentences"], "offsets": [102, 103, 104, 105, 106]}], "trigger": {"text": "guarantee", "tokens": ["guarantee"], "offsets": [100]}}, {"event_type": "FAC", "arguments": [{"text": "medwriter", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["medwriter"], "offsets": [52]}, {"text": "effectiveness", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["effectiveness"], "offsets": [155]}, {"text": "automatic evaluation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["automatic", "evaluation"], "offsets": [160, 161]}, {"text": "human evaluation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["human", "evaluation"], "offsets": [163, 164]}, {"text": "on two datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["on", "two", "datasets"], "offsets": [165, 166, 167]}], "trigger": {"text": "verified", "tokens": ["verified"], "offsets": [153]}}, {"event_type": "PUR", "arguments": [{"text": "predefined template database", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["predefined", "template", "database"], "offsets": [27, 28, 29]}, {"text": "hierarchical nature of medical report generation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["hierarchical", "nature", "of", "medical", "report", "generation"], "offsets": [38, 39, 40, 41, 42, 43]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [46]}}], "document": ["medical", "report", "generation", "is", "one", "of", "the", "most", "challenging", "tasks", "in", "medical", "image", "analysis", ".", "although", "existing", "approaches", "have", "achieved", "promising", "results", ",", "they", "either", "require", "a", "predefined", "template", "database", "in", "order", "to", "retrieve", "sentences", "or", "ignore", "the", "hierarchical", "nature", "of", "medical", "report", "generation", ".", "to", "address", "these", "issues", ",", "we", "propose", "medwriter", "that", "incorporates", "a", "novel", "hierarchical", "retrieval", "mechanism", "to", "automatically", "extract", "both", "report", "and", "sentence", "-", "level", "templates", "for", "clinically", "accurate", "report", "generation", ".", "medwriter", "first", "employs", "the", "visual", "-", "language", "retrieval", "(", "vlr", ")", "module", "to", "retrieve", "the", "most", "relevant", "reports", "for", "the", "given", "images", ".", "to", "guarantee", "the", "logical", "coherence", "between", "generated", "sentences", ",", "the", "language", "-", "language", "retrieval", "(", "llr", ")", "module", "is", "introduced", "to", "retrieve", "relevant", "sentences", "based", "on", "the", "previous", "generated", "description", ".", "at", "last", ",", "a", "language", "decoder", "fuses", "image", "features", "and", "features", "from", "retrieved", "reports", "and", "sentences", "to", "generate", "meaningful", "medical", "reports", ".", "we", "verified", "the", "effectiveness", "of", "our", "model", "by", "automatic", "evaluation", "and", "human", "evaluation", "on", "two", "datasets", ",", "i", ".", "e", ".", ",", "open", "-", "i", "and", "mimic", "-", "cxr", "."]}, {"venue": "ACL", "title": "SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing", "abstract": "Motivated by the success of T5 (Text-To-Text Transfer Transformer) in pre-trained natural language processing models, we propose a unified-modal SpeechT5 framework that explores the encoder-decoder pre-training for self-supervised speech/text representation learning. The SpeechT5 framework consists of a shared encoder-decoder network and six modal-specific (speech/text) pre/post-nets. After preprocessing the input speech/text through the pre-nets, the shared encoder-decoder network models the sequence-to-sequence transformation, and then the post-nets generate the output in the speech/text modality based on the output of the decoder. Leveraging large-scale unlabeled speech and text data, we pre-train SpeechT5 to learn a unified-modal representation, hoping to improve the modeling capability for both speech and text. To align the textual and speech information into this unified semantic space, we propose a cross-modal vector quantization approach that randomly mixes up speech/text states with latent units as the interface between encoder and decoder. Extensive evaluations show the superiority of the proposed SpeechT5 framework on a wide variety of spoken language processing tasks, including automatic speech recognition, speech synthesis, speech translation, voice conversion, speech enhancement, and speaker identification.", "doc_id": "ed4affaec155afc95f380e26d2a633c7", "publication_year": 2022, "sentences": ["motivated by the success of t5 ( text - to - text transfer transformer ) in pre - trained natural language processing models , we propose a unified - modal speecht5 framework that explores the encoder - decoder pre - training for self - supervised speech / text representation learning .", "the speecht5 framework consists of a shared encoder - decoder network and six modal - specific ( speech / text ) pre / post - nets .", "after preprocessing the input speech / text through the pre - nets , the shared encoder - decoder network models the sequence - to - sequence transformation , and then the post - nets generate the output in the speech / text modality based on the output of the decoder .", "leveraging large - scale unlabeled speech and text data , we pre - train speecht5 to learn a unified - modal representation , hoping to improve the modeling capability for both speech and text .", "to align the textual and speech information into this unified semantic space , we propose a cross - modal vector quantization approach that randomly mixes up speech / text states with latent units as the interface between encoder and decoder .", "extensive evaluations show the superiority of the proposed speecht5 framework on a wide variety of spoken language processing tasks , including automatic speech recognition , speech synthesis , speech translation , voice conversion , speech enhancement , and speaker identification ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [24]}, {"text": "unified - modal speecht5 framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["unified", "-", "modal", "speecht5", "framework"], "offsets": [27, 28, 29, 30, 31]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [25]}}, {"event_type": "MDS", "arguments": [{"text": "pre - nets", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["pre", "-", "nets"], "offsets": [87, 88, 89]}, {"text": "input speech / text", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["input", "speech", "/", "text"], "offsets": [81, 82, 83, 84]}], "trigger": {"text": "preprocessing", "tokens": ["preprocessing"], "offsets": [79]}}, {"event_type": "MDS", "arguments": [{"text": "shared encoder - decoder network", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["shared", "encoder", "-", "decoder", "network"], "offsets": [92, 93, 94, 95, 96]}, {"text": "sequence - to - sequence transformation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["sequence", "-", "to", "-", "sequence", "transformation"], "offsets": [99, 100, 101, 102, 103, 104]}], "trigger": {"text": "models", "tokens": ["models"], "offsets": [97]}}, {"event_type": "MDS", "arguments": [{"text": "post - nets", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["post", "-", "nets"], "offsets": [109, 110, 111]}, {"text": "output in the speech / text modality", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["output", "in", "the", "speech", "/", "text", "modality"], "offsets": [114, 115, 116, 117, 118, 119, 120]}, {"text": "output of the decoder", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["output", "of", "the", "decoder"], "offsets": [124, 125, 126, 127]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [112]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [177]}, {"text": "cross - modal vector quantization approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["cross", "-", "modal", "vector", "quantization", "approach"], "offsets": [180, 181, 182, 183, 184, 185]}, {"text": "align", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["align"], "offsets": [165]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [178]}}, {"event_type": "PUR", "arguments": [{"text": "unified semantic space", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["unified", "semantic", "space"], "offsets": [173, 174, 175]}, {"text": "textual information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["textual", "information"], "offsets": [167, 170]}, {"text": "speech information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["speech", "information"], "offsets": [169, 170]}], "trigger": {"text": "align", "tokens": ["align"], "offsets": [165]}}, {"event_type": "MDS", "arguments": [{"text": "latent units", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["latent", "units"], "offsets": [195, 196]}, {"text": "interface between encoder and decoder", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["interface", "between", "encoder", "and", "decoder"], "offsets": [199, 200, 201, 202, 203]}, {"text": "speech states", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["speech", "states"], "offsets": [190, 193]}, {"text": "text states", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["text", "states"], "offsets": [192, 193]}], "trigger": {"text": "randomly mixes up", "tokens": ["randomly", "mixes", "up"], "offsets": [187, 188, 189]}}, {"event_type": "FAC", "arguments": [{"text": "on a wide variety of spoken language processing tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "a", "wide", "variety", "of", "spoken", "language", "processing", "tasks"], "offsets": [215, 216, 217, 218, 219, 220, 221, 222, 223]}, {"text": "superiority of the proposed speecht5 framework", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["superiority", "of", "the", "proposed", "speecht5", "framework"], "offsets": [209, 210, 211, 212, 213, 214]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [207]}}, {"event_type": "ITT", "arguments": [{"text": "pre - trained natural language processing models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["pre", "-", "trained", "natural", "language", "processing", "models"], "offsets": [16, 17, 18, 19, 20, 21, 22]}], "trigger": {"text": "motivated", "tokens": ["motivated"], "offsets": [0]}}, {"event_type": "WKS", "arguments": [{"text": "encoder - decoder pre - training", "nugget_type": "APP", "argument_type": "Content", "tokens": ["encoder", "-", "decoder", "pre", "-", "training"], "offsets": [35, 36, 37, 38, 39, 40]}, {"text": "self - supervised speech / text representation learning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["self", "-", "supervised", "speech", "/", "text", "representation", "learning"], "offsets": [42, 43, 44, 45, 46, 47, 48, 49]}], "trigger": {"text": "explores", "tokens": ["explores"], "offsets": [33]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [139]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [154]}, {"text": "leveraging large - scale unlabeled speech and text data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["leveraging", "large", "-", "scale", "unlabeled", "speech", "and", "text", "data"], "offsets": [129, 130, 131, 132, 133, 134, 135, 136, 137]}, {"text": "speecht5", "nugget_type": "APP", "argument_type": "Content", "tokens": ["speecht5"], "offsets": [143]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [145]}], "trigger": {"text": "pre - train", "tokens": ["pre", "-", "train"], "offsets": [140, 141, 142]}}, {"event_type": "PUR", "arguments": [{"text": "modeling capability for both speech and text", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["modeling", "capability", "for", "both", "speech", "and", "text"], "offsets": [156, 157, 158, 159, 160, 161, 162]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [154]}}, {"event_type": "PUR", "arguments": [{"text": "unified - modal representation", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["unified", "-", "modal", "representation"], "offsets": [147, 148, 149, 150]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [145]}}], "document": ["motivated", "by", "the", "success", "of", "t5", "(", "text", "-", "to", "-", "text", "transfer", "transformer", ")", "in", "pre", "-", "trained", "natural", "language", "processing", "models", ",", "we", "propose", "a", "unified", "-", "modal", "speecht5", "framework", "that", "explores", "the", "encoder", "-", "decoder", "pre", "-", "training", "for", "self", "-", "supervised", "speech", "/", "text", "representation", "learning", ".", "the", "speecht5", "framework", "consists", "of", "a", "shared", "encoder", "-", "decoder", "network", "and", "six", "modal", "-", "specific", "(", "speech", "/", "text", ")", "pre", "/", "post", "-", "nets", ".", "after", "preprocessing", "the", "input", "speech", "/", "text", "through", "the", "pre", "-", "nets", ",", "the", "shared", "encoder", "-", "decoder", "network", "models", "the", "sequence", "-", "to", "-", "sequence", "transformation", ",", "and", "then", "the", "post", "-", "nets", "generate", "the", "output", "in", "the", "speech", "/", "text", "modality", "based", "on", "the", "output", "of", "the", "decoder", ".", "leveraging", "large", "-", "scale", "unlabeled", "speech", "and", "text", "data", ",", "we", "pre", "-", "train", "speecht5", "to", "learn", "a", "unified", "-", "modal", "representation", ",", "hoping", "to", "improve", "the", "modeling", "capability", "for", "both", "speech", "and", "text", ".", "to", "align", "the", "textual", "and", "speech", "information", "into", "this", "unified", "semantic", "space", ",", "we", "propose", "a", "cross", "-", "modal", "vector", "quantization", "approach", "that", "randomly", "mixes", "up", "speech", "/", "text", "states", "with", "latent", "units", "as", "the", "interface", "between", "encoder", "and", "decoder", ".", "extensive", "evaluations", "show", "the", "superiority", "of", "the", "proposed", "speecht5", "framework", "on", "a", "wide", "variety", "of", "spoken", "language", "processing", "tasks", ",", "including", "automatic", "speech", "recognition", ",", "speech", "synthesis", ",", "speech", "translation", ",", "voice", "conversion", ",", "speech", "enhancement", ",", "and", "speaker", "identification", "."]}, {"venue": "ACL", "title": "Can Generative Pre-trained Language Models Serve As Knowledge Bases for Closed-book QA?", "abstract": "Recent work has investigated the interesting question using pre-trained language models (PLMs) as knowledge bases for answering open questions. However, existing work is limited in using small benchmarks with high test-train overlaps. We construct a new dataset of closed-book QA using SQuAD, and investigate the performance of BART. Experiments show that it is challenging for BART to remember training facts in high precision, and also challenging to answer closed-book questions even if relevant knowledge is retained. Some promising directions are found, including decoupling the knowledge memorizing process and the QA finetune process, forcing the model to recall relevant knowledge when question answering.", "doc_id": "cd9d6133b9e603e27f9be18e300c6a12", "publication_year": 2021, "sentences": ["recent work has investigated the interesting question using pre - trained language models ( plms ) as knowledge bases for answering open questions .", "however , existing work is limited in using small benchmarks with high test - train overlaps .", "we construct a new dataset of closed - book qa using squad , and investigate the performance of bart .", "experiments show that it is challenging for bart to remember training facts in high precision , and also challenging to answer closed - book questions even if relevant knowledge is retained .", "some promising directions are found , including decoupling the knowledge memorizing process and the qa finetune process , forcing the model to recall relevant knowledge when question answering ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pre - trained language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["pre", "-", "trained", "language", "models"], "offsets": [8, 9, 10, 11, 12]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "limited", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["limited"], "offsets": [29]}, {"text": "small benchmarks with high test - train overlaps", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["small", "benchmarks", "with", "high", "test", "-", "train", "overlaps"], "offsets": [32, 33, 34, 35, 36, 37, 38, 39]}], "trigger": {"text": "limited", "tokens": ["limited"], "offsets": [29]}}, {"event_type": "MDS", "arguments": [{"text": "squad", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["squad"], "offsets": [52]}, {"text": "dataset of closed - book qa", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["dataset", "of", "closed", "-", "book", "qa"], "offsets": [45, 46, 47, 48, 49, 50]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [42]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [41]}, {"text": "performance of bart", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["performance", "of", "bart"], "offsets": [57, 58, 59]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [55]}}, {"event_type": "FAC", "arguments": [{"text": "bart", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["bart"], "offsets": [68]}, {"text": "in high precision", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "high", "precision"], "offsets": [73, 74, 75]}, {"text": "remember", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["remember"], "offsets": [70]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [66]}}, {"event_type": "FAC", "arguments": [{"text": "bart", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["bart"], "offsets": [68]}, {"text": "closed - book questions", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["closed", "-", "book", "questions"], "offsets": [82, 83, 84, 85]}, {"text": "if relevant knowledge is retained", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["if", "relevant", "knowledge", "is", "retained"], "offsets": [87, 88, 89, 90, 91]}], "trigger": {"text": "challenging to answer", "tokens": ["challenging", "to", "answer"], "offsets": [79, 80, 81]}}, {"event_type": "FAC", "arguments": [{"text": "recall", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["recall"], "offsets": [115]}, {"text": "knowledge memorizing process", "nugget_type": "MOD", "argument_type": "Object", "tokens": ["knowledge", "memorizing", "process"], "offsets": [102, 103, 104]}, {"text": "qa finetune process", "nugget_type": "MOD", "argument_type": "Object", "tokens": ["qa", "finetune", "process"], "offsets": [107, 108, 109]}], "trigger": {"text": "decoupling", "tokens": ["decoupling"], "offsets": [100]}}, {"event_type": "PUR", "arguments": [{"text": "relevant knowledge", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["relevant", "knowledge"], "offsets": [116, 117]}], "trigger": {"text": "recall", "tokens": ["recall"], "offsets": [115]}}, {"event_type": "PUR", "arguments": [{"text": "training facts", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["training", "facts"], "offsets": [71, 72]}], "trigger": {"text": "remember", "tokens": ["remember"], "offsets": [70]}}], "document": ["recent", "work", "has", "investigated", "the", "interesting", "question", "using", "pre", "-", "trained", "language", "models", "(", "plms", ")", "as", "knowledge", "bases", "for", "answering", "open", "questions", ".", "however", ",", "existing", "work", "is", "limited", "in", "using", "small", "benchmarks", "with", "high", "test", "-", "train", "overlaps", ".", "we", "construct", "a", "new", "dataset", "of", "closed", "-", "book", "qa", "using", "squad", ",", "and", "investigate", "the", "performance", "of", "bart", ".", "experiments", "show", "that", "it", "is", "challenging", "for", "bart", "to", "remember", "training", "facts", "in", "high", "precision", ",", "and", "also", "challenging", "to", "answer", "closed", "-", "book", "questions", "even", "if", "relevant", "knowledge", "is", "retained", ".", "some", "promising", "directions", "are", "found", ",", "including", "decoupling", "the", "knowledge", "memorizing", "process", "and", "the", "qa", "finetune", "process", ",", "forcing", "the", "model", "to", "recall", "relevant", "knowledge", "when", "question", "answering", "."]}, {"venue": "ACL", "title": "Attentive Pooling with Learnable Norms for Text Representation", "abstract": "Pooling is an important technique for learning text representations in many neural NLP models. In conventional pooling methods such as average, max and attentive pooling, text representations are weighted summations of the L1 or L\u221e norm of input features. However, their pooling norms are always fixed and may not be optimal for learning accurate text representations in different tasks. In addition, in many popular pooling methods such as max and attentive pooling some features may be over-emphasized, while other useful ones are not fully exploited. In this paper, we propose an Attentive Pooling with Learnable Norms (APLN) approach for text representation. Different from existing pooling methods that use a fixed pooling norm, we propose to learn the norm in an end-to-end manner to automatically find the optimal ones for text representation in different tasks. In addition, we propose two methods to ensure the numerical stability of the model training. The first one is scale limiting, which re-scales the input to ensure non-negativity and alleviate the risk of exponential explosion. The second one is re-formulation, which decomposes the exponent operation to avoid computing the real-valued powers of the input and further accelerate the pooling operation. Experimental results on four benchmark datasets show that our approach can effectively improve the performance of attentive pooling.", "doc_id": "406cb10974637a2be59480b06d41a2fc", "publication_year": 2020, "sentences": ["pooling is an important technique for learning text representations in many neural nlp models .", "in conventional pooling methods such as average , max and attentive pooling , text representations are weighted summations of the l1 or l\u221e norm of input features .", "however , their pooling norms are always fixed and may not be optimal for learning accurate text representations in different tasks .", "in addition , in many popular pooling methods such as max and attentive pooling some features may be over - emphasized , while other useful ones are not fully exploited .", "in this paper , we propose an attentive pooling with learnable norms ( apln ) approach for text representation .", "different from existing pooling methods that use a fixed pooling norm , we propose to learn the norm in an end - to - end manner to automatically find the optimal ones for text representation in different tasks .", "in addition , we propose two methods to ensure the numerical stability of the model training .", "the first one is scale limiting , which re - scales the input to ensure non - negativity and alleviate the risk of exponential explosion .", "the second one is re - formulation , which decomposes the exponent operation to avoid computing the real - valued powers of the input and further accelerate the pooling operation .", "experimental results on four benchmark datasets show that our approach can effectively improve the performance of attentive pooling ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pooling", "nugget_type": "APP", "argument_type": "Target", "tokens": ["pooling"], "offsets": [0]}], "trigger": {"text": "technique", "tokens": ["technique"], "offsets": [4]}}, {"event_type": "RWS", "arguments": [{"text": "text representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["text", "representations"], "offsets": [28, 29]}, {"text": "conventional pooling methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["conventional", "pooling", "methods"], "offsets": [16, 17, 18]}, {"text": "summations of the l1 or l\u221e norm of input features", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["summations", "of", "the", "l1", "or", "l\u221e", "norm", "of", "input", "features"], "offsets": [32, 33, 34, 35, 36, 37, 38, 39, 40, 41]}], "trigger": {"text": "weighted", "tokens": ["weighted"], "offsets": [31]}}, {"event_type": "RWF", "arguments": [{"text": "pooling norms", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["pooling", "norms"], "offsets": [46, 47]}], "trigger": {"text": "fixed", "tokens": ["fixed"], "offsets": [50]}}, {"event_type": "RWF", "arguments": [{"text": "pooling norms", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["pooling", "norms"], "offsets": [46, 47]}, {"text": "learning", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learning"], "offsets": [57]}], "trigger": {"text": "not be optimal", "tokens": ["not", "be", "optimal"], "offsets": [53, 54, 55]}}, {"event_type": "PUR", "arguments": [{"text": "accurate text representations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["accurate", "text", "representations"], "offsets": [58, 59, 60]}], "trigger": {"text": "learning", "tokens": ["learning"], "offsets": [57]}}, {"event_type": "RWF", "arguments": [{"text": "popular pooling methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["popular", "pooling", "methods"], "offsets": [70, 71, 72]}, {"text": "some features", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["some", "features"], "offsets": [79, 80]}], "trigger": {"text": "over - emphasized", "tokens": ["over", "-", "emphasized"], "offsets": [83, 84, 85]}}, {"event_type": "RWF", "arguments": [{"text": "popular pooling methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["popular", "pooling", "methods"], "offsets": [70, 71, 72]}, {"text": "other useful ones", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["other", "useful", "ones"], "offsets": [88, 89, 90]}], "trigger": {"text": "not fully exploited", "tokens": ["not", "fully", "exploited"], "offsets": [92, 93, 94]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [100]}, {"text": "attentive pooling with learnable norms ( apln ) approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["attentive", "pooling", "with", "learnable", "norms", "approach"], "offsets": [103, 104, 105, 106, 107, 111]}, {"text": "text representation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["text", "representation"], "offsets": [113, 114]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [101]}}, {"event_type": "RWS", "arguments": [{"text": "existing pooling methods", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["existing", "pooling", "methods"], "offsets": [118, 119, 120]}, {"text": "fixed pooling norm", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["fixed", "pooling", "norm"], "offsets": [124, 125, 126]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [122]}}, {"event_type": "MDS", "arguments": [{"text": "find", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["find"], "offsets": [144]}, {"text": "norm", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["norm"], "offsets": [133]}, {"text": "in an end - to - end manner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "an", "end", "-", "to", "-", "end", "manner"], "offsets": [134, 135, 136, 137, 138, 139, 140, 141]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [131]}}, {"event_type": "PUR", "arguments": [{"text": "optimal ones for text representation", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["optimal", "ones", "for", "text", "representation"], "offsets": [146, 147, 148, 149, 150]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [144]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [158]}, {"text": "two methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "methods"], "offsets": [160, 161]}, {"text": "ensure", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["ensure"], "offsets": [163]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [159]}}, {"event_type": "PUR", "arguments": [{"text": "numerical stability", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["numerical", "stability"], "offsets": [165, 166]}], "trigger": {"text": "ensure", "tokens": ["ensure"], "offsets": [163]}}, {"event_type": "MDS", "arguments": [{"text": "ensure", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["ensure"], "offsets": [186]}, {"text": "scale limiting", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["scale", "limiting"], "offsets": [176, 177]}, {"text": "input", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["input"], "offsets": [184]}], "trigger": {"text": "re - scales", "tokens": ["re", "-", "scales"], "offsets": [180, 181, 182]}}, {"event_type": "PUR", "arguments": [{"text": "non - negativity and alleviate the risk of exponential explosion", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["non", "-", "negativity", "and", "alleviate", "the", "risk", "of", "exponential", "explosion"], "offsets": [187, 188, 189, 190, 191, 192, 193, 194, 195, 196]}], "trigger": {"text": "ensure", "tokens": ["ensure"], "offsets": [186]}}, {"event_type": "MDS", "arguments": [{"text": "avoid", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["avoid"], "offsets": [212]}, {"text": "re - formulation", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["re", "-", "formulation"], "offsets": [202, 203, 204]}, {"text": "exponent operation", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["exponent", "operation"], "offsets": [209, 210]}], "trigger": {"text": "decomposes", "tokens": ["decomposes"], "offsets": [207]}}, {"event_type": "PUR", "arguments": [{"text": "computing the real - valued powers of the input", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["computing", "the", "real", "-", "valued", "powers", "of", "the", "input"], "offsets": [213, 214, 215, 216, 217, 218, 219, 220, 221]}], "trigger": {"text": "avoid", "tokens": ["avoid"], "offsets": [212]}}, {"event_type": "MDS", "arguments": [{"text": "re - formulation", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["re", "-", "formulation"], "offsets": [202, 203, 204]}, {"text": "pooling operation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["pooling", "operation"], "offsets": [226, 227]}], "trigger": {"text": "accelerate", "tokens": ["accelerate"], "offsets": [224]}}, {"event_type": "FAC", "arguments": [{"text": "performance of attentive pooling", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["performance", "of", "attentive", "pooling"], "offsets": [243, 244, 245, 246]}, {"text": "effectively", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["effectively"], "offsets": [240]}, {"text": "four benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["four", "benchmark", "datasets"], "offsets": [232, 233, 234]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [241]}}], "document": ["pooling", "is", "an", "important", "technique", "for", "learning", "text", "representations", "in", "many", "neural", "nlp", "models", ".", "in", "conventional", "pooling", "methods", "such", "as", "average", ",", "max", "and", "attentive", "pooling", ",", "text", "representations", "are", "weighted", "summations", "of", "the", "l1", "or", "l\u221e", "norm", "of", "input", "features", ".", "however", ",", "their", "pooling", "norms", "are", "always", "fixed", "and", "may", "not", "be", "optimal", "for", "learning", "accurate", "text", "representations", "in", "different", "tasks", ".", "in", "addition", ",", "in", "many", "popular", "pooling", "methods", "such", "as", "max", "and", "attentive", "pooling", "some", "features", "may", "be", "over", "-", "emphasized", ",", "while", "other", "useful", "ones", "are", "not", "fully", "exploited", ".", "in", "this", "paper", ",", "we", "propose", "an", "attentive", "pooling", "with", "learnable", "norms", "(", "apln", ")", "approach", "for", "text", "representation", ".", "different", "from", "existing", "pooling", "methods", "that", "use", "a", "fixed", "pooling", "norm", ",", "we", "propose", "to", "learn", "the", "norm", "in", "an", "end", "-", "to", "-", "end", "manner", "to", "automatically", "find", "the", "optimal", "ones", "for", "text", "representation", "in", "different", "tasks", ".", "in", "addition", ",", "we", "propose", "two", "methods", "to", "ensure", "the", "numerical", "stability", "of", "the", "model", "training", ".", "the", "first", "one", "is", "scale", "limiting", ",", "which", "re", "-", "scales", "the", "input", "to", "ensure", "non", "-", "negativity", "and", "alleviate", "the", "risk", "of", "exponential", "explosion", ".", "the", "second", "one", "is", "re", "-", "formulation", ",", "which", "decomposes", "the", "exponent", "operation", "to", "avoid", "computing", "the", "real", "-", "valued", "powers", "of", "the", "input", "and", "further", "accelerate", "the", "pooling", "operation", ".", "experimental", "results", "on", "four", "benchmark", "datasets", "show", "that", "our", "approach", "can", "effectively", "improve", "the", "performance", "of", "attentive", "pooling", "."]}, {"venue": "ACL", "title": "Multi-Head Highly Parallelized LSTM Decoder for Neural Machine Translation", "abstract": "One of the reasons Transformer translation models are popular is that self-attention networks for context modelling can be easily parallelized at sequence level. However, the computational complexity of a self-attention network is O(n2), increasing quadratically with sequence length. By contrast, the complexity of LSTM-based approaches is only O(n). In practice, however, LSTMs are much slower to train than self-attention networks as they cannot be parallelized at sequence level: to model context, the current LSTM state relies on the full LSTM computation of the preceding state. This has to be computed n times for a sequence of length n. The linear transformations involved in the LSTM gate and state computations are the major cost factors in this. To enable sequence-level parallelization of LSTMs, we approximate full LSTM context modelling by computing hidden states and gates with the current input and a simple bag-of-words representation of the preceding tokens context. This allows us to compute each input step efficiently in parallel, avoiding the formerly costly sequential linear transformations. We then connect the outputs of each parallel step with computationally cheap element-wise computations. We call this the Highly Parallelized LSTM. To further constrain the number of LSTM parameters, we compute several small HPLSTMs in parallel like multi-head attention in the Transformer. The experiments show that our MHPLSTM decoder achieves significant BLEU improvements, while being even slightly faster than the self-attention network in training, and much faster than the standard LSTM.", "doc_id": "afdfdb6250f2605aad0cde320db1875a", "publication_year": 2021, "sentences": ["one of the reasons transformer translation models are popular is that self - attention networks for context modelling can be easily parallelized at sequence level .", "however , the computational complexity of a self - attention network is o ( n2 ) , increasing quadratically with sequence length .", "by contrast , the complexity of lstm - based approaches is only o ( n ) .", "in practice , however , lstms are much slower to train than self - attention networks as they cannot be parallelized at sequence level : to model context , the current lstm state relies on the full lstm computation of the preceding state .", "this has to be computed n times for a sequence of length n .", "the linear transformations involved in the lstm gate and state computations are the major cost factors in this .", "to enable sequence - level parallelization of lstms , we approximate full lstm context modelling by computing hidden states and gates with the current input and a simple bag - of - words representation of the preceding tokens context .", "this allows us to compute each input step efficiently in parallel , avoiding the formerly costly sequential linear transformations .", "we then connect the outputs of each parallel step with computationally cheap element - wise computations .", "we call this the highly parallelized lstm .", "to further constrain the number of lstm parameters , we compute several small hplstms in parallel like multi - head attention in the transformer .", "the experiments show that our mhplstm decoder achieves significant bleu improvements , while being even slightly faster than the self - attention network in training , and much faster than the standard lstm ."], "events": [{"event_type": "ITT", "arguments": [{"text": "transformer translation models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["transformer", "translation", "models"], "offsets": [4, 5, 6]}], "trigger": {"text": "popular", "tokens": ["popular"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "lstms", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["lstms"], "offsets": [71]}, {"text": "much slower", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["much", "slower"], "offsets": [73, 74]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [76]}}, {"event_type": "RWF", "arguments": [{"text": "at sequence level", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "sequence", "level"], "offsets": [87, 88, 89]}, {"text": "cannot be parallelized", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["cannot", "be", "parallelized"], "offsets": [84, 85, 86]}], "trigger": {"text": "cannot be parallelized", "tokens": ["cannot", "be", "parallelized"], "offsets": [84, 85, 86]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [152]}, {"text": "full lstm context modelling", "nugget_type": "APP", "argument_type": "Content", "tokens": ["full", "lstm", "context", "modelling"], "offsets": [154, 155, 156, 157]}, {"text": "enable", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enable"], "offsets": [144]}], "trigger": {"text": "approximate", "tokens": ["approximate"], "offsets": [153]}}, {"event_type": "PUR", "arguments": [{"text": "sequence - level parallelization of lstms", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["sequence", "-", "level", "parallelization", "of", "lstms"], "offsets": [145, 146, 147, 148, 149, 150]}], "trigger": {"text": "enable", "tokens": ["enable"], "offsets": [144]}}, {"event_type": "MDS", "arguments": [{"text": "current input", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["current", "input"], "offsets": [166, 167]}, {"text": "bag - of - words representation", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["bag", "-", "of", "-", "words", "representation"], "offsets": [171, 172, 173, 174, 175, 176]}, {"text": "hidden states", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["hidden", "states"], "offsets": [160, 161]}, {"text": "hidden gates", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["hidden", "gates"], "offsets": [160, 163]}], "trigger": {"text": "computing", "tokens": ["computing"], "offsets": [159]}}, {"event_type": "MDS", "arguments": [{"text": "outputs of each parallel step", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["outputs", "of", "each", "parallel", "step"], "offsets": [207, 208, 209, 210, 211]}, {"text": "computationally cheap element - wise computations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["computationally", "cheap", "element", "-", "wise", "computations"], "offsets": [213, 214, 215, 216, 217, 218]}], "trigger": {"text": "connect", "tokens": ["connect"], "offsets": [205]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [237]}, {"text": "highly parallelized lstm", "nugget_type": "APP", "argument_type": "Content", "tokens": ["highly", "parallelized", "lstm"], "offsets": [224, 225, 226]}, {"text": "in parallel", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "parallel"], "offsets": [242, 243]}, {"text": "constrain", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["constrain"], "offsets": [230]}], "trigger": {"text": "compute", "tokens": ["compute"], "offsets": [238]}}, {"event_type": "PUR", "arguments": [{"text": "number of lstm parameters", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["number", "of", "lstm", "parameters"], "offsets": [232, 233, 234, 235]}], "trigger": {"text": "constrain", "tokens": ["constrain"], "offsets": [230]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [260]}, {"text": "slightly faster", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["slightly", "faster"], "offsets": [268, 269]}, {"text": "much faster", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["much", "faster"], "offsets": [280, 281]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [255]}}, {"event_type": "FAC", "arguments": [{"text": "mhplstm decoder", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["mhplstm", "decoder"], "offsets": [258, 259]}, {"text": "bleu improvements", "nugget_type": "STR", "argument_type": "Object", "tokens": ["bleu", "improvements"], "offsets": [262, 263]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [260]}}, {"event_type": "CMP", "arguments": [{"text": "mhplstm decoder", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["mhplstm", "decoder"], "offsets": [258, 259]}, {"text": "self - attention network", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["self", "-", "attention", "network"], "offsets": [272, 273, 274, 275]}, {"text": "in training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "training"], "offsets": [276, 277]}], "trigger": {"text": "slightly faster", "tokens": ["slightly", "faster"], "offsets": [268, 269]}}, {"event_type": "CMP", "arguments": [{"text": "mhplstm decoder", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["mhplstm", "decoder"], "offsets": [258, 259]}, {"text": "standard lstm", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["standard", "lstm"], "offsets": [284, 285]}], "trigger": {"text": "much faster", "tokens": ["much", "faster"], "offsets": [280, 281]}}, {"event_type": "RWF", "arguments": [{"text": "computational complexity of a self - attention network", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["computational", "complexity", "of", "a", "self", "-", "attention", "network"], "offsets": [29, 30, 31, 32, 33, 34, 35, 36]}, {"text": "quadratically", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["quadratically"], "offsets": [44]}, {"text": "with sequence length", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "sequence", "length"], "offsets": [45, 46, 47]}], "trigger": {"text": "increasing", "tokens": ["increasing"], "offsets": [43]}}, {"event_type": "RWF", "arguments": [{"text": "linear transformations", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["linear", "transformations"], "offsets": [125, 126]}], "trigger": {"text": "major cost factors", "tokens": ["major", "cost", "factors"], "offsets": [137, 138, 139]}}], "document": ["one", "of", "the", "reasons", "transformer", "translation", "models", "are", "popular", "is", "that", "self", "-", "attention", "networks", "for", "context", "modelling", "can", "be", "easily", "parallelized", "at", "sequence", "level", ".", "however", ",", "the", "computational", "complexity", "of", "a", "self", "-", "attention", "network", "is", "o", "(", "n2", ")", ",", "increasing", "quadratically", "with", "sequence", "length", ".", "by", "contrast", ",", "the", "complexity", "of", "lstm", "-", "based", "approaches", "is", "only", "o", "(", "n", ")", ".", "in", "practice", ",", "however", ",", "lstms", "are", "much", "slower", "to", "train", "than", "self", "-", "attention", "networks", "as", "they", "cannot", "be", "parallelized", "at", "sequence", "level", ":", "to", "model", "context", ",", "the", "current", "lstm", "state", "relies", "on", "the", "full", "lstm", "computation", "of", "the", "preceding", "state", ".", "this", "has", "to", "be", "computed", "n", "times", "for", "a", "sequence", "of", "length", "n", ".", "the", "linear", "transformations", "involved", "in", "the", "lstm", "gate", "and", "state", "computations", "are", "the", "major", "cost", "factors", "in", "this", ".", "to", "enable", "sequence", "-", "level", "parallelization", "of", "lstms", ",", "we", "approximate", "full", "lstm", "context", "modelling", "by", "computing", "hidden", "states", "and", "gates", "with", "the", "current", "input", "and", "a", "simple", "bag", "-", "of", "-", "words", "representation", "of", "the", "preceding", "tokens", "context", ".", "this", "allows", "us", "to", "compute", "each", "input", "step", "efficiently", "in", "parallel", ",", "avoiding", "the", "formerly", "costly", "sequential", "linear", "transformations", ".", "we", "then", "connect", "the", "outputs", "of", "each", "parallel", "step", "with", "computationally", "cheap", "element", "-", "wise", "computations", ".", "we", "call", "this", "the", "highly", "parallelized", "lstm", ".", "to", "further", "constrain", "the", "number", "of", "lstm", "parameters", ",", "we", "compute", "several", "small", "hplstms", "in", "parallel", "like", "multi", "-", "head", "attention", "in", "the", "transformer", ".", "the", "experiments", "show", "that", "our", "mhplstm", "decoder", "achieves", "significant", "bleu", "improvements", ",", "while", "being", "even", "slightly", "faster", "than", "the", "self", "-", "attention", "network", "in", "training", ",", "and", "much", "faster", "than", "the", "standard", "lstm", "."]}, {"venue": "ACL", "title": "Treebank Embedding Vectors for Out-of-Domain Dependency Parsing", "abstract": "A recent advance in monolingual dependency parsing is the idea of a treebank embedding vector, which allows all treebanks for a particular language to be used as training data while at the same time allowing the model to prefer training data from one treebank over others and to select the preferred treebank at test time. We build on this idea by 1) introducing a method to predict a treebank vector for sentences that do not come from a treebank used in training, and 2) exploring what happens when we move away from predefined treebank embedding vectors during test time and instead devise tailored interpolations. We show that 1) there are interpolated vectors that are superior to the predefined ones, and 2) treebank vectors can be predicted with sufficient accuracy, for nine out of ten test languages, to match the performance of an oracle approach that knows the most suitable predefined treebank embedding for the test set.", "doc_id": "e2b21a669a9008ad6fd140c01a97c0fe", "publication_year": 2020, "sentences": ["a recent advance in monolingual dependency parsing is the idea of a treebank embedding vector , which allows all treebanks for a particular language to be used as training data while at the same time allowing the model to prefer training data from one treebank over others and to select the preferred treebank at test time .", "we build on this idea by 1 ) introducing a method to predict a treebank vector for sentences that do not come from a treebank used in training , and 2 ) exploring what happens when we move away from predefined treebank embedding vectors during test time and instead devise tailored interpolations .", "we show that 1 ) there are interpolated vectors that are superior to the predefined ones , and 2 ) treebank vectors can be predicted with sufficient accuracy , for nine out of ten test languages , to match the performance of an oracle approach that knows the most suitable predefined treebank embedding for the test set ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [57]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [67]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [69]}], "trigger": {"text": "introducing", "tokens": ["introducing"], "offsets": [65]}}, {"event_type": "PUR", "arguments": [{"text": "treebank vector for sentences", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["treebank", "vector", "for", "sentences"], "offsets": [71, 72, 73, 74]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [69]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [110]}, {"text": "superior", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["superior"], "offsets": [121]}, {"text": "predicted", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["predicted"], "offsets": [134]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [111]}}, {"event_type": "FAC", "arguments": [{"text": "interpolated vectors", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["interpolated", "vectors"], "offsets": [117, 118]}, {"text": "predefined ones", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["predefined", "vectors"], "offsets": [124, 118]}], "trigger": {"text": "superior", "tokens": ["superior"], "offsets": [121]}}, {"event_type": "FAC", "arguments": [{"text": "match", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["match"], "offsets": [148]}, {"text": "with sufficient accuracy", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "sufficient", "accuracy"], "offsets": [135, 136, 137]}, {"text": "treebank vectors", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["treebank", "vectors"], "offsets": [130, 131]}, {"text": "nine out of ten test languages", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["nine", "out", "of", "ten", "test", "languages"], "offsets": [140, 141, 142, 143, 144, 145]}], "trigger": {"text": "predicted", "tokens": ["predicted"], "offsets": [134]}}, {"event_type": "PUR", "arguments": [{"text": "performance of an oracle approach", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["performance", "of", "an", "oracle", "approach"], "offsets": [150, 151, 152, 153, 154]}], "trigger": {"text": "match", "tokens": ["match"], "offsets": [148]}}, {"event_type": "ITT", "arguments": [{"text": "monolingual dependency parsing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["monolingual", "dependency", "parsing"], "offsets": [4, 5, 6]}], "trigger": {"text": "advance", "tokens": ["advance"], "offsets": [2]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [57]}, {"text": "predefined treebank embedding vectors", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["predefined", "treebank", "embedding", "vectors"], "offsets": [97, 98, 99, 100]}, {"text": "during test time", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "test", "time"], "offsets": [101, 102, 103]}, {"text": "instead devise tailored interpolations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["instead", "devise", "tailored", "interpolations"], "offsets": [105, 106, 107, 108]}], "trigger": {"text": "exploring", "tokens": ["exploring"], "offsets": [89]}}], "document": ["a", "recent", "advance", "in", "monolingual", "dependency", "parsing", "is", "the", "idea", "of", "a", "treebank", "embedding", "vector", ",", "which", "allows", "all", "treebanks", "for", "a", "particular", "language", "to", "be", "used", "as", "training", "data", "while", "at", "the", "same", "time", "allowing", "the", "model", "to", "prefer", "training", "data", "from", "one", "treebank", "over", "others", "and", "to", "select", "the", "preferred", "treebank", "at", "test", "time", ".", "we", "build", "on", "this", "idea", "by", "1", ")", "introducing", "a", "method", "to", "predict", "a", "treebank", "vector", "for", "sentences", "that", "do", "not", "come", "from", "a", "treebank", "used", "in", "training", ",", "and", "2", ")", "exploring", "what", "happens", "when", "we", "move", "away", "from", "predefined", "treebank", "embedding", "vectors", "during", "test", "time", "and", "instead", "devise", "tailored", "interpolations", ".", "we", "show", "that", "1", ")", "there", "are", "interpolated", "vectors", "that", "are", "superior", "to", "the", "predefined", "ones", ",", "and", "2", ")", "treebank", "vectors", "can", "be", "predicted", "with", "sufficient", "accuracy", ",", "for", "nine", "out", "of", "ten", "test", "languages", ",", "to", "match", "the", "performance", "of", "an", "oracle", "approach", "that", "knows", "the", "most", "suitable", "predefined", "treebank", "embedding", "for", "the", "test", "set", "."]}, {"venue": "ACL", "title": "Keywords and Instances: A Hierarchical Contrastive Learning Framework Unifying Hybrid Granularities for Text Generation", "abstract": "Contrastive learning has achieved impressive success in generation tasks to militate the \u201cexposure bias\u201d problem and discriminatively exploit the different quality of references. Existing works mostly focus on contrastive learning on the instance-level without discriminating the contribution of each word, while keywords are the gist of the text and dominant the constrained mapping relationships. Hence, in this work, we propose a hierarchical contrastive learning mechanism, which can unify hybrid granularities semantic meaning in the input text. Concretely, we first propose a keyword graph via contrastive correlations of positive-negative pairs to iteratively polish the keyword representations. Then, we construct intra-contrasts within instance-level and keyword-level, where we assume words are sampled nodes from a sentence distribution. Finally, to bridge the gap between independent contrast levels and tackle the common contrast vanishing problem, we propose an inter-contrast mechanism that measures the discrepancy between contrastive keyword nodes respectively to the instance distribution. Experiments demonstrate that our model outperforms competitive baselines on paraphrasing, dialogue generation, and storytelling tasks.", "doc_id": "c4babeaa021e41298b027cce00b90532", "publication_year": 2022, "sentences": ["contrastive learning has achieved impressive success in generation tasks to militate the \u201c exposure bias \u201d problem and discriminatively exploit the different quality of references .", "existing works mostly focus on contrastive learning on the instance - level without discriminating the contribution of each word , while keywords are the gist of the text and dominant the constrained mapping relationships .", "hence , in this work , we propose a hierarchical contrastive learning mechanism , which can unify hybrid granularities semantic meaning in the input text .", "concretely , we first propose a keyword graph via contrastive correlations of positive - negative pairs to iteratively polish the keyword representations .", "then , we construct intra - contrasts within instance - level and keyword - level , where we assume words are sampled nodes from a sentence distribution .", "finally , to bridge the gap between independent contrast levels and tackle the common contrast vanishing problem , we propose an inter - contrast mechanism that measures the discrepancy between contrastive keyword nodes respectively to the instance distribution .", "experiments demonstrate that our model outperforms competitive baselines on paraphrasing , dialogue generation , and storytelling tasks ."], "events": [{"event_type": "ITT", "arguments": [{"text": "contrastive learning", "nugget_type": "APP", "argument_type": "Target", "tokens": ["contrastive", "learning"], "offsets": [0, 1]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [3]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [67]}, {"text": "hierarchical contrastive learning mechanism", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hierarchical", "contrastive", "learning", "mechanism"], "offsets": [70, 71, 72, 73]}, {"text": "unify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["unify"], "offsets": [77]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [68]}}, {"event_type": "PUR", "arguments": [{"text": "hybrid granularities semantic meaning", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["hybrid", "granularities", "semantic", "meaning"], "offsets": [78, 79, 80, 81]}, {"text": "in the input text", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "input", "text"], "offsets": [82, 83, 84, 85]}], "trigger": {"text": "unify", "tokens": ["unify"], "offsets": [77]}}, {"event_type": "MDS", "arguments": [{"text": "polish", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["polish"], "offsets": [105]}, {"text": "contrastive correlations of positive - negative pairs", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["contrastive", "correlations", "of", "positive", "-", "negative", "pairs"], "offsets": [96, 97, 98, 99, 100, 101, 102]}, {"text": "keyword graph", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["keyword", "graph"], "offsets": [93, 94]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [91]}}, {"event_type": "PUR", "arguments": [{"text": "keyword representations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["keyword", "representations"], "offsets": [107, 108]}, {"text": "iteratively", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["iteratively"], "offsets": [104]}], "trigger": {"text": "polish", "tokens": ["polish"], "offsets": [105]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [112]}, {"text": "intra - contrasts", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["intra", "-", "contrasts"], "offsets": [114, 115, 116]}, {"text": "within instance - level and keyword - level", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["within", "instance", "-", "level", "and", "keyword", "-", "level"], "offsets": [117, 118, 119, 120, 121, 122, 123, 124]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [113]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [156]}, {"text": "inter - contrast mechanism", "nugget_type": "APP", "argument_type": "Content", "tokens": ["inter", "-", "contrast", "mechanism"], "offsets": [159, 160, 161, 162]}, {"text": "bridge", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["bridge"], "offsets": [141]}, {"text": "tackle", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["tackle"], "offsets": [149]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [157]}}, {"event_type": "PUR", "arguments": [{"text": "gap between independent contrast levels", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["gap", "between", "independent", "contrast", "levels"], "offsets": [143, 144, 145, 146, 147]}], "trigger": {"text": "bridge", "tokens": ["bridge"], "offsets": [141]}}, {"event_type": "PUR", "arguments": [{"text": "common contrast vanishing problem", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["common", "contrast", "vanishing", "problem"], "offsets": [151, 152, 153, 154]}], "trigger": {"text": "tackle", "tokens": ["tackle"], "offsets": [149]}}, {"event_type": "WKS", "arguments": [{"text": "discrepancy between contrastive keyword nodes", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["discrepancy", "between", "contrastive", "keyword", "nodes"], "offsets": [166, 167, 168, 169, 170]}, {"text": "respectively to the instance distribution", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["respectively", "to", "the", "instance", "distribution"], "offsets": [171, 172, 173, 174, 175]}], "trigger": {"text": "measures", "tokens": ["measures"], "offsets": [164]}}, {"event_type": "CMP", "arguments": [{"text": "competitive baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["competitive", "baselines"], "offsets": [183, 184]}, {"text": "paraphrasing", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["paraphrasing"], "offsets": [186]}, {"text": "dialogue generation", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["dialogue", "generation"], "offsets": [188, 189]}, {"text": "storytelling", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["storytelling"], "offsets": [192]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [182]}}, {"event_type": "RWS", "arguments": [{"text": "existing works", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "works"], "offsets": [26, 27]}, {"text": "contrastive learning on the instance - level", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["contrastive", "learning", "on", "the", "instance", "-", "level"], "offsets": [31, 32, 33, 34, 35, 36, 37]}], "trigger": {"text": "focus", "tokens": ["focus"], "offsets": [29]}}, {"event_type": "RWF", "arguments": [{"text": "without", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["without"], "offsets": [38]}, {"text": "discriminating the contribution of each word", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["discriminating", "the", "contribution", "of", "each", "word"], "offsets": [39, 40, 41, 42, 43, 44]}], "trigger": {"text": "without", "tokens": ["without"], "offsets": [38]}}], "document": ["contrastive", "learning", "has", "achieved", "impressive", "success", "in", "generation", "tasks", "to", "militate", "the", "\u201c", "exposure", "bias", "\u201d", "problem", "and", "discriminatively", "exploit", "the", "different", "quality", "of", "references", ".", "existing", "works", "mostly", "focus", "on", "contrastive", "learning", "on", "the", "instance", "-", "level", "without", "discriminating", "the", "contribution", "of", "each", "word", ",", "while", "keywords", "are", "the", "gist", "of", "the", "text", "and", "dominant", "the", "constrained", "mapping", "relationships", ".", "hence", ",", "in", "this", "work", ",", "we", "propose", "a", "hierarchical", "contrastive", "learning", "mechanism", ",", "which", "can", "unify", "hybrid", "granularities", "semantic", "meaning", "in", "the", "input", "text", ".", "concretely", ",", "we", "first", "propose", "a", "keyword", "graph", "via", "contrastive", "correlations", "of", "positive", "-", "negative", "pairs", "to", "iteratively", "polish", "the", "keyword", "representations", ".", "then", ",", "we", "construct", "intra", "-", "contrasts", "within", "instance", "-", "level", "and", "keyword", "-", "level", ",", "where", "we", "assume", "words", "are", "sampled", "nodes", "from", "a", "sentence", "distribution", ".", "finally", ",", "to", "bridge", "the", "gap", "between", "independent", "contrast", "levels", "and", "tackle", "the", "common", "contrast", "vanishing", "problem", ",", "we", "propose", "an", "inter", "-", "contrast", "mechanism", "that", "measures", "the", "discrepancy", "between", "contrastive", "keyword", "nodes", "respectively", "to", "the", "instance", "distribution", ".", "experiments", "demonstrate", "that", "our", "model", "outperforms", "competitive", "baselines", "on", "paraphrasing", ",", "dialogue", "generation", ",", "and", "storytelling", "tasks", "."]}, {"venue": "ACL", "title": "Reliability Testing for Natural Language Processing Systems", "abstract": "Questions of fairness, robustness, and transparency are paramount to address before deploying NLP systems. Central to these concerns is the question of reliability: Can NLP systems reliably treat different demographics fairly and function correctly in diverse and noisy environments? To address this, we argue for the need for reliability testing and contextualize it among existing work on improving accountability. We show how adversarial attacks can be reframed for this goal, via a framework for developing reliability tests. We argue that reliability testing \u2014 with an emphasis on interdisciplinary collaboration \u2014 will enable rigorous and targeted testing, and aid in the enactment and enforcement of industry standards.", "doc_id": "70631c934ff3363cf499568c86cb7d71", "publication_year": 2021, "sentences": ["questions of fairness , robustness , and transparency are paramount to address before deploying nlp systems .", "central to these concerns is the question of reliability : can nlp systems reliably treat different demographics fairly and function correctly in diverse and noisy environments ?", "to address this , we argue for the need for reliability testing and contextualize it among existing work on improving accountability .", "we show how adversarial attacks can be reframed for this goal , via a framework for developing reliability tests .", "we argue that reliability testing \u2014 with an emphasis on interdisciplinary collaboration \u2014 will enable rigorous and targeted testing , and aid in the enactment and enforcement of industry standards ."], "events": [{"event_type": "ITT", "arguments": [{"text": "nlp systems", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nlp", "systems"], "offsets": [14, 15]}], "trigger": {"text": "deploying", "tokens": ["deploying"], "offsets": [13]}}, {"event_type": "WKS", "arguments": [{"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [45]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [48]}, {"text": "need for reliability testing", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["need", "for", "reliability", "testing"], "offsets": [52, 53, 54, 55]}], "trigger": {"text": "argue", "tokens": ["argue"], "offsets": [49]}}, {"event_type": "PUR", "arguments": [{"text": "question of reliability", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["question", "of", "reliability"], "offsets": [23, 24, 25]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [45]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [48]}, {"text": "reliability testing", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["reliability", "testing"], "offsets": [54, 55]}, {"text": "existing work on improving accountability", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["existing", "work", "on", "improving", "accountability"], "offsets": [60, 61, 62, 63, 64]}], "trigger": {"text": "contextualize", "tokens": ["contextualize"], "offsets": [57]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [66]}, {"text": "framework for developing reliability tests", "nugget_type": "APP", "argument_type": "Content", "tokens": ["framework", "for", "developing", "reliability", "tests"], "offsets": [80, 81, 82, 83, 84]}, {"text": "reframed", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reframed"], "offsets": [73]}], "trigger": {"text": "via", "tokens": ["via"], "offsets": [78]}}, {"event_type": "PUR", "arguments": [{"text": "adversarial attacks", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["adversarial", "attacks"], "offsets": [69, 70]}], "trigger": {"text": "reframed", "tokens": ["reframed"], "offsets": [73]}}], "document": ["questions", "of", "fairness", ",", "robustness", ",", "and", "transparency", "are", "paramount", "to", "address", "before", "deploying", "nlp", "systems", ".", "central", "to", "these", "concerns", "is", "the", "question", "of", "reliability", ":", "can", "nlp", "systems", "reliably", "treat", "different", "demographics", "fairly", "and", "function", "correctly", "in", "diverse", "and", "noisy", "environments", "?", "to", "address", "this", ",", "we", "argue", "for", "the", "need", "for", "reliability", "testing", "and", "contextualize", "it", "among", "existing", "work", "on", "improving", "accountability", ".", "we", "show", "how", "adversarial", "attacks", "can", "be", "reframed", "for", "this", "goal", ",", "via", "a", "framework", "for", "developing", "reliability", "tests", ".", "we", "argue", "that", "reliability", "testing", "\u2014", "with", "an", "emphasis", "on", "interdisciplinary", "collaboration", "\u2014", "will", "enable", "rigorous", "and", "targeted", "testing", ",", "and", "aid", "in", "the", "enactment", "and", "enforcement", "of", "industry", "standards", "."]}, {"venue": "ACL", "title": "Metaphor Generation with Conceptual Mappings", "abstract": "Generating metaphors is a difficult task as it requires understanding nuanced relationships between abstract concepts. In this paper, we aim to generate a metaphoric sentence given a literal expression by replacing relevant verbs. Guided by conceptual metaphor theory, we propose to control the generation process by encoding conceptual mappings between cognitive domains to generate meaningful metaphoric expressions. To achieve this, we develop two methods: 1) using FrameNet-based embeddings to learn mappings between domains and applying them at the lexical level (CM-Lex), and 2) deriving source/target pairs to train a controlled seq-to-seq generation model (CM-BART). We assess our methods through automatic and human evaluation for basic metaphoricity and conceptual metaphor presence. We show that the unsupervised CM-Lex model is competitive with recent deep learning metaphor generation systems, and CM-BART outperforms all other models both in automatic and human evaluations.", "doc_id": "db9e1947618a38ce8f364739ce078d5b", "publication_year": 2021, "sentences": ["generating metaphors is a difficult task as it requires understanding nuanced relationships between abstract concepts .", "in this paper , we aim to generate a metaphoric sentence given a literal expression by replacing relevant verbs .", "guided by conceptual metaphor theory , we propose to control the generation process by encoding conceptual mappings between cognitive domains to generate meaningful metaphoric expressions .", "to achieve this , we develop two methods : 1 ) using framenet - based embeddings to learn mappings between domains and applying them at the lexical level ( cm - lex ) , and 2 ) deriving source / target pairs to train a controlled seq - to - seq generation model ( cm - bart ) .", "we assess our methods through automatic and human evaluation for basic metaphoricity and conceptual metaphor presence .", "we show that the unsupervised cm - lex model is competitive with recent deep learning metaphor generation systems , and cm - bart outperforms all other models both in automatic and human evaluations ."], "events": [{"event_type": "ITT", "arguments": [{"text": "metaphors", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["metaphors"], "offsets": [1]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [0]}}, {"event_type": "PUR", "arguments": [{"text": "metaphoric sentence", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["metaphoric", "sentence"], "offsets": [25, 26]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [23]}}, {"event_type": "MDS", "arguments": [{"text": "guided by conceptual metaphor theory", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["guided", "by", "conceptual", "metaphor", "theory"], "offsets": [36, 37, 38, 39, 40]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [57]}, {"text": "conceptual mappings", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["conceptual", "mappings"], "offsets": [51, 52]}, {"text": "generation process", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["generation", "process"], "offsets": [47, 48]}], "trigger": {"text": "control", "tokens": ["control"], "offsets": [45]}}, {"event_type": "PUR", "arguments": [{"text": "meaningful metaphoric expressions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["meaningful", "metaphoric", "expressions"], "offsets": [58, 59, 60]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [57]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [66]}, {"text": "two methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "methods"], "offsets": [68, 69]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [67]}}, {"event_type": "MDS", "arguments": [{"text": "framenet - based embeddings", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["framenet", "-", "based", "embeddings"], "offsets": [74, 75, 76, 77]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [79]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [73]}}, {"event_type": "PUR", "arguments": [{"text": "mappings between domains", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["mappings", "between", "domains"], "offsets": [80, 81, 82]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [79]}}, {"event_type": "WKS", "arguments": [{"text": "mappings", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["mappings"], "offsets": [80]}, {"text": "at the lexical level", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "the", "lexical", "level"], "offsets": [86, 87, 88, 89]}], "trigger": {"text": "applying", "tokens": ["applying"], "offsets": [84]}}, {"event_type": "MDS", "arguments": [{"text": "train", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["train"], "offsets": [105]}, {"text": "source pairs", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["source", "pairs"], "offsets": [100, 103]}, {"text": "target pairs", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["target", "pairs"], "offsets": [102, 103]}], "trigger": {"text": "deriving", "tokens": ["deriving"], "offsets": [99]}}, {"event_type": "PUR", "arguments": [{"text": "controlled seq - to - seq generation model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["controlled", "seq", "-", "to", "-", "seq", "generation", "model"], "offsets": [107, 108, 109, 110, 111, 112, 113, 114]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [105]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [138]}, {"text": "competitive", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["competitive"], "offsets": [148]}, {"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [161]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [139]}}, {"event_type": "CMP", "arguments": [{"text": "unsupervised cm - lex model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unsupervised", "cm", "-", "lex", "model"], "offsets": [142, 143, 144, 145, 146]}, {"text": "competitive", "nugget_type": "STR", "argument_type": "Result", "tokens": ["competitive"], "offsets": [148]}], "trigger": {"text": "competitive", "tokens": ["competitive"], "offsets": [148]}}, {"event_type": "CMP", "arguments": [{"text": "cm - bart", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["cm", "-", "bart"], "offsets": [158, 159, 160]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [161]}, {"text": "all other models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["all", "other", "models"], "offsets": [162, 163, 164]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [161]}}, {"event_type": "WKS", "arguments": [{"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [23]}, {"text": "relevant verbs", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["relevant", "verbs"], "offsets": [33, 34]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [20]}], "trigger": {"text": "replacing", "tokens": ["replacing"], "offsets": [32]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [121]}, {"text": "assess", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["assess"], "offsets": [122]}, {"text": "automatic evaluation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["automatic", "evaluation"], "offsets": [126, 129]}, {"text": "human evaluation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["human", "evaluation"], "offsets": [128, 129]}], "trigger": {"text": "through", "tokens": ["through"], "offsets": [125]}}, {"event_type": "PUR", "arguments": [{"text": "two methods", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["two", "methods"], "offsets": [68, 69]}], "trigger": {"text": "assess", "tokens": ["assess"], "offsets": [122]}}], "document": ["generating", "metaphors", "is", "a", "difficult", "task", "as", "it", "requires", "understanding", "nuanced", "relationships", "between", "abstract", "concepts", ".", "in", "this", "paper", ",", "we", "aim", "to", "generate", "a", "metaphoric", "sentence", "given", "a", "literal", "expression", "by", "replacing", "relevant", "verbs", ".", "guided", "by", "conceptual", "metaphor", "theory", ",", "we", "propose", "to", "control", "the", "generation", "process", "by", "encoding", "conceptual", "mappings", "between", "cognitive", "domains", "to", "generate", "meaningful", "metaphoric", "expressions", ".", "to", "achieve", "this", ",", "we", "develop", "two", "methods", ":", "1", ")", "using", "framenet", "-", "based", "embeddings", "to", "learn", "mappings", "between", "domains", "and", "applying", "them", "at", "the", "lexical", "level", "(", "cm", "-", "lex", ")", ",", "and", "2", ")", "deriving", "source", "/", "target", "pairs", "to", "train", "a", "controlled", "seq", "-", "to", "-", "seq", "generation", "model", "(", "cm", "-", "bart", ")", ".", "we", "assess", "our", "methods", "through", "automatic", "and", "human", "evaluation", "for", "basic", "metaphoricity", "and", "conceptual", "metaphor", "presence", ".", "we", "show", "that", "the", "unsupervised", "cm", "-", "lex", "model", "is", "competitive", "with", "recent", "deep", "learning", "metaphor", "generation", "systems", ",", "and", "cm", "-", "bart", "outperforms", "all", "other", "models", "both", "in", "automatic", "and", "human", "evaluations", "."]}, {"venue": "ACL", "title": "Hubless Nearest Neighbor Search for Bilingual Lexicon Induction", "abstract": "Bilingual Lexicon Induction (BLI) is the task of translating words from corpora in two languages. Recent advances in BLI work by aligning the two word embedding spaces. Following that, a key step is to retrieve the nearest neighbor (NN) in the target space given the source word. However, a phenomenon called hubness often degrades the accuracy of NN. Hubness appears as some data points, called hubs, being extra-ordinarily close to many of the other data points. Reducing hubness is necessary for retrieval tasks. One successful example is Inverted SoFtmax (ISF), recently proposed to improve NN. This work proposes a new method, Hubless Nearest Neighbor (HNN), to mitigate hubness. HNN differs from NN by imposing an additional equal preference assumption. Moreover, the HNN formulation explains why ISF works as well as it does. Empirical results demonstrate that HNN outperforms NN, ISF and other state-of-the-art. For reproducibility and follow-ups, we have published all code.", "doc_id": "cc30920d6ea4046a4d4a696d3a6c9cac", "publication_year": 2019, "sentences": ["bilingual lexicon induction ( bli ) is the task of translating words from corpora in two languages .", "recent advances in bli work by aligning the two word embedding spaces .", "following that , a key step is to retrieve the nearest neighbor ( nn ) in the target space given the source word .", "however , a phenomenon called hubness often degrades the accuracy of nn .", "hubness appears as some data points , called hubs , being extra - ordinarily close to many of the other data points .", "reducing hubness is necessary for retrieval tasks .", "one successful example is inverted softmax ( isf ) , recently proposed to improve nn .", "this work proposes a new method , hubless nearest neighbor ( hnn ) , to mitigate hubness .", "hnn differs from nn by imposing an additional equal preference assumption .", "moreover , the hnn formulation explains why isf works as well as it does .", "empirical results demonstrate that hnn outperforms nn , isf and other state - of - the - art .", "for reproducibility and follow - ups , we have published all code ."], "events": [{"event_type": "ITT", "arguments": [{"text": "bilingual lexicon induction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["bilingual", "lexicon", "induction"], "offsets": [0, 1, 2]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [8]}}, {"event_type": "RWS", "arguments": [{"text": "two word embedding spaces", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["two", "word", "embedding", "spaces"], "offsets": [26, 27, 28, 29]}], "trigger": {"text": "aligning", "tokens": ["aligning"], "offsets": [24]}}, {"event_type": "RWS", "arguments": [{"text": "nearest neighbor", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["nearest", "neighbor"], "offsets": [41, 42]}, {"text": "target space given the source word", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["target", "space", "given", "the", "source", "word"], "offsets": [48, 49, 50, 51, 52, 53]}], "trigger": {"text": "retrieve", "tokens": ["retrieve"], "offsets": [39]}}, {"event_type": "RWF", "arguments": [{"text": "degrades", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["degrades"], "offsets": [62]}, {"text": "accuracy of nn", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["accuracy", "of", "nearest", "neighbor"], "offsets": [64, 65, 41, 42]}], "trigger": {"text": "degrades", "tokens": ["degrades"], "offsets": [62]}}, {"event_type": "PRP", "arguments": [{"text": "inverted softmax", "nugget_type": "APP", "argument_type": "Content", "tokens": ["inverted", "softmax"], "offsets": [103, 104]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [112]}], "trigger": {"text": "proposed", "tokens": ["proposed"], "offsets": [110]}}, {"event_type": "PUR", "arguments": [{"text": "nearest neighbor", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["nearest", "neighbor"], "offsets": [41, 42]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [112]}}, {"event_type": "PRP", "arguments": [{"text": "hubless nearest neighbor", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hubless", "nearest", "neighbor"], "offsets": [122, 123, 124]}, {"text": "mitigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["mitigate"], "offsets": [130]}], "trigger": {"text": "proposes", "tokens": ["proposes"], "offsets": [117]}}, {"event_type": "PUR", "arguments": [{"text": "hubness", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["hubness"], "offsets": [131]}], "trigger": {"text": "mitigate", "tokens": ["mitigate"], "offsets": [130]}}, {"event_type": "MDS", "arguments": [{"text": "additional equal preference assumption", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["additional", "equal", "preference", "assumption"], "offsets": [140, 141, 142, 143]}], "trigger": {"text": "imposing", "tokens": ["imposing"], "offsets": [138]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [165]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [162]}}, {"event_type": "CMP", "arguments": [{"text": "hubless nearest neighbor", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["hubless", "nearest", "neighbor"], "offsets": [122, 123, 124]}, {"text": "nearest neighbor", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["nearest", "neighbor"], "offsets": [41, 42]}, {"text": "inverted softmax", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["inverted", "softmax"], "offsets": [103, 104]}, {"text": "other state - of - the - art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["other", "state", "-", "of", "-", "the", "-", "art"], "offsets": [170, 171, 172, 173, 174, 175, 176, 177]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [165]}}], "document": ["bilingual", "lexicon", "induction", "(", "bli", ")", "is", "the", "task", "of", "translating", "words", "from", "corpora", "in", "two", "languages", ".", "recent", "advances", "in", "bli", "work", "by", "aligning", "the", "two", "word", "embedding", "spaces", ".", "following", "that", ",", "a", "key", "step", "is", "to", "retrieve", "the", "nearest", "neighbor", "(", "nn", ")", "in", "the", "target", "space", "given", "the", "source", "word", ".", "however", ",", "a", "phenomenon", "called", "hubness", "often", "degrades", "the", "accuracy", "of", "nn", ".", "hubness", "appears", "as", "some", "data", "points", ",", "called", "hubs", ",", "being", "extra", "-", "ordinarily", "close", "to", "many", "of", "the", "other", "data", "points", ".", "reducing", "hubness", "is", "necessary", "for", "retrieval", "tasks", ".", "one", "successful", "example", "is", "inverted", "softmax", "(", "isf", ")", ",", "recently", "proposed", "to", "improve", "nn", ".", "this", "work", "proposes", "a", "new", "method", ",", "hubless", "nearest", "neighbor", "(", "hnn", ")", ",", "to", "mitigate", "hubness", ".", "hnn", "differs", "from", "nn", "by", "imposing", "an", "additional", "equal", "preference", "assumption", ".", "moreover", ",", "the", "hnn", "formulation", "explains", "why", "isf", "works", "as", "well", "as", "it", "does", ".", "empirical", "results", "demonstrate", "that", "hnn", "outperforms", "nn", ",", "isf", "and", "other", "state", "-", "of", "-", "the", "-", "art", ".", "for", "reproducibility", "and", "follow", "-", "ups", ",", "we", "have", "published", "all", "code", "."]}, {"venue": "ACL", "title": "AMR Parsing with Latent Structural Information", "abstract": "Abstract Meaning Representations (AMRs) capture sentence-level semantics structural representations to broad-coverage natural sentences. We investigate parsing AMR with explicit dependency structures and interpretable latent structures. We generate the latent soft structure without additional annotations, and fuse both dependency and latent structure via an extended graph neural networks. The fused structural information helps our experiments results to achieve the best reported results on both AMR 2.0 (77.5% Smatch F1 on LDC2017T10) and AMR 1.0 ((71.8% Smatch F1 on LDC2014T12).", "doc_id": "e35d4b2df7e782d3a169e321646dd28f", "publication_year": 2020, "sentences": ["abstract meaning representations ( amrs ) capture sentence - level semantics structural representations to broad - coverage natural sentences .", "we investigate parsing amr with explicit dependency structures and interpretable latent structures .", "we generate the latent soft structure without additional annotations , and fuse both dependency and latent structure via an extended graph neural networks .", "the fused structural information helps our experiments results to achieve the best reported results on both amr 2 . 0 ( 77 . 5 % smatch f1 on ldc2017t10 ) and amr 1 . 0 ( ( 71 . 8 % smatch f1 on ldc2014t12 ) ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [33]}, {"text": "without additional annotations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "additional", "annotations"], "offsets": [39, 40, 41]}, {"text": "latent soft structure", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["latent", "soft", "structure"], "offsets": [36, 37, 38]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [34]}}, {"event_type": "MDS", "arguments": [{"text": "extended graph neural networks", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["extended", "graph", "neural", "networks"], "offsets": [52, 53, 54, 55]}, {"text": "dependency structure", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["dependency", "structure"], "offsets": [46, 49]}, {"text": "latent structure", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["latent", "structure"], "offsets": [48, 49]}], "trigger": {"text": "fuse", "tokens": ["fuse"], "offsets": [44]}}, {"event_type": "FAC", "arguments": [{"text": "on both amr 2 . 0 ( 77 . 5 % smatch f1 on ldc2017t10 ) and amr 1 . 0", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "both", "amr", "2", ".", "0", "and", "amr", "1", ".", "0"], "offsets": [71, 72, 73, 74, 75, 76, 87, 88, 89, 90, 91]}, {"text": "best reported results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["best", "reported", "results"], "offsets": [68, 69, 70]}, {"text": "fused structural information", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["fused", "structural", "information"], "offsets": [58, 59, 60]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [66]}}, {"event_type": "ITT", "arguments": [{"text": "abstract meaning representations", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["amr"], "offsets": [23]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [6]}}, {"event_type": "PUR", "arguments": [{"text": "amr", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["amr"], "offsets": [23]}], "trigger": {"text": "parsing", "tokens": ["parsing"], "offsets": [22]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [20]}, {"text": "explicit dependency structures", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["explicit", "dependency", "structures"], "offsets": [25, 26, 27]}, {"text": "interpretable latent structures", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["interpretable", "latent", "structures"], "offsets": [29, 30, 31]}, {"text": "parsing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["parsing"], "offsets": [22]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [21]}}], "document": ["abstract", "meaning", "representations", "(", "amrs", ")", "capture", "sentence", "-", "level", "semantics", "structural", "representations", "to", "broad", "-", "coverage", "natural", "sentences", ".", "we", "investigate", "parsing", "amr", "with", "explicit", "dependency", "structures", "and", "interpretable", "latent", "structures", ".", "we", "generate", "the", "latent", "soft", "structure", "without", "additional", "annotations", ",", "and", "fuse", "both", "dependency", "and", "latent", "structure", "via", "an", "extended", "graph", "neural", "networks", ".", "the", "fused", "structural", "information", "helps", "our", "experiments", "results", "to", "achieve", "the", "best", "reported", "results", "on", "both", "amr", "2", ".", "0", "(", "77", ".", "5", "%", "smatch", "f1", "on", "ldc2017t10", ")", "and", "amr", "1", ".", "0", "(", "(", "71", ".", "8", "%", "smatch", "f1", "on", "ldc2014t12", ")", "."]}, {"venue": "ACL", "title": "An Empirical Study on Adversarial Attack on NMT: Languages and Positions Matter", "abstract": "In this paper, we empirically investigate adversarial attack on NMT from two aspects: languages (the source vs. the target language) and positions (front vs. rear). For autoregressive NMT models that generate target words from left to right, we observe that adversarial attack on the source language is more effective than on the target language, and that attacking front positions of target sentences or positions of source sentences aligned to the front positions of corresponding target sentences is more effective than attacking other positions. We further exploit the attention distribution of the victim model to attack source sentences at positions that have a strong association with front target words. Experiment results demonstrate that our attention-based adversarial attack is more effective than adversarial attacks by sampling positions randomly or according to gradients.", "doc_id": "ac6ad8fa7bb20a4df1517cde88496985", "publication_year": 2021, "sentences": ["in this paper , we empirically investigate adversarial attack on nmt from two aspects : languages ( the source vs . the target language ) and positions ( front vs . rear ) .", "for autoregressive nmt models that generate target words from left to right , we observe that adversarial attack on the source language is more effective than on the target language , and that attacking front positions of target sentences or positions of source sentences aligned to the front positions of corresponding target sentences is more effective than attacking other positions .", "we further exploit the attention distribution of the victim model to attack source sentences at positions that have a strong association with front target words .", "experiment results demonstrate that our attention - based adversarial attack is more effective than adversarial attacks by sampling positions randomly or according to gradients ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [4]}, {"text": "from two aspects", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "two", "aspects"], "offsets": [11, 12, 13]}, {"text": "nmt", "nugget_type": "APP", "argument_type": "Target", "tokens": ["nmt"], "offsets": [10]}, {"text": "adversarial attack", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["adversarial", "attack"], "offsets": [7, 8]}], "trigger": {"text": "empirically investigate", "tokens": ["empirically", "investigate"], "offsets": [5, 6]}}, {"event_type": "MDS", "arguments": [{"text": "nmt models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["nmt", "models"], "offsets": [36, 37]}, {"text": "target words", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["target", "words"], "offsets": [40, 41]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [39]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [47]}, {"text": "effective", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["effective"], "offsets": [58]}], "trigger": {"text": "observe", "tokens": ["observe"], "offsets": [48]}}, {"event_type": "FAC", "arguments": [{"text": "on the source language", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "source", "language"], "offsets": [52, 53, 54, 55]}, {"text": "more", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["more"], "offsets": [57]}, {"text": "adversarial attack", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["adversarial", "attack"], "offsets": [50, 51]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [58]}}, {"event_type": "FIN", "arguments": [{"text": "effective", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["effective"], "offsets": [89]}], "trigger": {"text": "observe", "tokens": ["observe"], "offsets": [48]}}, {"event_type": "FAC", "arguments": [{"text": "aligned to the front positions of corresponding target sentences", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["aligned", "to", "the", "front", "positions", "of", "corresponding", "target", "sentences"], "offsets": [78, 79, 80, 81, 82, 83, 84, 85, 86]}, {"text": "more", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["more"], "offsets": [88]}, {"text": "attacking front positions of target sentences", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["attacking", "front", "positions", "of", "target", "sentences"], "offsets": [67, 68, 69, 70, 71, 72]}, {"text": "attacking front positions of source sentences", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["attacking", "front", "positions", "of", "source", "sentences"], "offsets": [67, 68, 74, 75, 76, 77]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [89]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [95]}, {"text": "attention distribution of the victim model", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["attention", "distribution", "of", "the", "victim", "model"], "offsets": [99, 100, 101, 102, 103, 104]}, {"text": "at positions that have a strong association with front target words", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "positions", "that", "have", "a", "strong", "association", "with", "front", "target", "words"], "offsets": [109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119]}, {"text": "attack", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["attack"], "offsets": [106]}], "trigger": {"text": "further exploit", "tokens": ["further", "exploit"], "offsets": [96, 97]}}, {"event_type": "PUR", "arguments": [{"text": "source sentences", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["source", "sentences"], "offsets": [107, 108]}], "trigger": {"text": "attack", "tokens": ["attack"], "offsets": [106]}}, {"event_type": "FIN", "arguments": [{"text": "effective", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["effective"], "offsets": [133]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [123]}}, {"event_type": "FAC", "arguments": [{"text": "attention - based adversarial attack", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["attention", "-", "based", "adversarial", "attack"], "offsets": [126, 127, 128, 129, 130]}, {"text": "more", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["more"], "offsets": [132]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [133]}}], "document": ["in", "this", "paper", ",", "we", "empirically", "investigate", "adversarial", "attack", "on", "nmt", "from", "two", "aspects", ":", "languages", "(", "the", "source", "vs", ".", "the", "target", "language", ")", "and", "positions", "(", "front", "vs", ".", "rear", ")", ".", "for", "autoregressive", "nmt", "models", "that", "generate", "target", "words", "from", "left", "to", "right", ",", "we", "observe", "that", "adversarial", "attack", "on", "the", "source", "language", "is", "more", "effective", "than", "on", "the", "target", "language", ",", "and", "that", "attacking", "front", "positions", "of", "target", "sentences", "or", "positions", "of", "source", "sentences", "aligned", "to", "the", "front", "positions", "of", "corresponding", "target", "sentences", "is", "more", "effective", "than", "attacking", "other", "positions", ".", "we", "further", "exploit", "the", "attention", "distribution", "of", "the", "victim", "model", "to", "attack", "source", "sentences", "at", "positions", "that", "have", "a", "strong", "association", "with", "front", "target", "words", ".", "experiment", "results", "demonstrate", "that", "our", "attention", "-", "based", "adversarial", "attack", "is", "more", "effective", "than", "adversarial", "attacks", "by", "sampling", "positions", "randomly", "or", "according", "to", "gradients", "."]}, {"venue": "ACL", "title": "Literary Event Detection", "abstract": "In this work we present a new dataset of literary events\u2014events that are depicted as taking place within the imagined space of a novel. While previous work has focused on event detection in the domain of contemporary news, literature poses a number of complications for existing systems, including complex narration, the depiction of a broad array of mental states, and a strong emphasis on figurative language. We outline the annotation decisions of this new dataset and compare several models for predicting events; the best performing model, a bidirectional LSTM with BERT token representations, achieves an F1 score of 73.9. We then apply this model to a corpus of novels split across two dimensions\u2014prestige and popularity\u2014and demonstrate that there are statistically significant differences in the distribution of events for prestige.", "doc_id": "ca729612e8ec747bdaf6d938e308e231", "publication_year": 2019, "sentences": ["in this work we present a new dataset of literary events \u2014 events that are depicted as taking place within the imagined space of a novel .", "while previous work has focused on event detection in the domain of contemporary news , literature poses a number of complications for existing systems , including complex narration , the depiction of a broad array of mental states , and a strong emphasis on figurative language .", "we outline the annotation decisions of this new dataset and compare several models for predicting events ; the best performing model , a bidirectional lstm with bert token representations , achieves an f1 score of 73 . 9 .", "we then apply this model to a corpus of novels split across two dimensions \u2014 prestige and popularity \u2014 and demonstrate that there are statistically significant differences in the distribution of events for prestige ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [3]}, {"text": "dataset of literary events", "nugget_type": "DST", "argument_type": "Content", "tokens": ["dataset", "of", "literary", "events"], "offsets": [7, 8, 9, 10]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "number of complications", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["number", "of", "complications"], "offsets": [45, 46, 47]}, {"text": "existing systems", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["existing", "systems"], "offsets": [49, 50]}], "trigger": {"text": "poses", "tokens": ["poses"], "offsets": [43]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [74]}, {"text": "annotation decisions of this new dataset", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["annotation", "decisions", "of", "dataset", "of", "literary", "events"], "offsets": [77, 78, 79, 7, 8, 9, 10]}], "trigger": {"text": "outline", "tokens": ["outline"], "offsets": [75]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [74]}, {"text": "models for predicting events", "nugget_type": "APP", "argument_type": "Content", "tokens": ["models", "for", "predicting", "events"], "offsets": [86, 87, 88, 89]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [84]}}, {"event_type": "FAC", "arguments": [{"text": "f1 score", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["f1", "score"], "offsets": [106, 107]}, {"text": "73 . 9", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["73", ".", "9"], "offsets": [109, 110, 111]}, {"text": "bidirectional lstm with bert token representations", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["bidirectional", "lstm", "with", "bert", "token", "representations"], "offsets": [97, 98, 99, 100, 101, 102]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [104]}}, {"event_type": "MDS", "arguments": [{"text": "bidirectional lstm with bert token representations", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["bidirectional", "lstm", "with", "bert", "token", "representations"], "offsets": [97, 98, 99, 100, 101, 102]}, {"text": "across two dimensions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "two", "dimensions"], "offsets": [124, 125, 126]}, {"text": "corpus of novels split", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["corpus", "of", "novels", "split"], "offsets": [120, 121, 122, 123]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [115]}}, {"event_type": "FAC", "arguments": [{"text": "statistically significant differences", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["statistically", "significant", "differences"], "offsets": [137, 138, 139]}, {"text": "in the distribution of events for prestige", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "distribution", "of", "events", "for", "prestige"], "offsets": [140, 141, 142, 143, 144, 145, 146]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [133]}}], "document": ["in", "this", "work", "we", "present", "a", "new", "dataset", "of", "literary", "events", "\u2014", "events", "that", "are", "depicted", "as", "taking", "place", "within", "the", "imagined", "space", "of", "a", "novel", ".", "while", "previous", "work", "has", "focused", "on", "event", "detection", "in", "the", "domain", "of", "contemporary", "news", ",", "literature", "poses", "a", "number", "of", "complications", "for", "existing", "systems", ",", "including", "complex", "narration", ",", "the", "depiction", "of", "a", "broad", "array", "of", "mental", "states", ",", "and", "a", "strong", "emphasis", "on", "figurative", "language", ".", "we", "outline", "the", "annotation", "decisions", "of", "this", "new", "dataset", "and", "compare", "several", "models", "for", "predicting", "events", ";", "the", "best", "performing", "model", ",", "a", "bidirectional", "lstm", "with", "bert", "token", "representations", ",", "achieves", "an", "f1", "score", "of", "73", ".", "9", ".", "we", "then", "apply", "this", "model", "to", "a", "corpus", "of", "novels", "split", "across", "two", "dimensions", "\u2014", "prestige", "and", "popularity", "\u2014", "and", "demonstrate", "that", "there", "are", "statistically", "significant", "differences", "in", "the", "distribution", "of", "events", "for", "prestige", "."]}, {"venue": "ACL", "title": "Learning Deep Transformer Models for Machine Translation", "abstract": "Transformer is the state-of-the-art model in recent machine translation evaluations. Two strands of research are promising to improve models of this kind: the first uses wide networks (a.k.a. Transformer-Big) and has been the de facto standard for development of the Transformer system, and the other uses deeper language representation but faces the difficulty arising from learning deep networks. Here, we continue the line of research on the latter. We claim that a truly deep Transformer model can surpass the Transformer-Big counterpart by 1) proper use of layer normalization and 2) a novel way of passing the combination of previous layers to the next. On WMT\u201916 English-German and NIST OpenMT\u201912 Chinese-English tasks, our deep system (30/25-layer encoder) outperforms the shallow Transformer-Big/Base baseline (6-layer encoder) by 0.4-2.4 BLEU points. As another bonus, the deep model is 1.6X smaller in size and 3X faster in training than Transformer-Big.", "doc_id": "665bbb46803fc37b5761efc84e5d5c96", "publication_year": 2019, "sentences": ["transformer is the state - of - the - art model in recent machine translation evaluations .", "two strands of research are promising to improve models of this kind : the first uses wide networks ( a . k . a . transformer - big ) and has been the de facto standard for development of the transformer system , and the other uses deeper language representation but faces the difficulty arising from learning deep networks .", "here , we continue the line of research on the latter .", "we claim that a truly deep transformer model can surpass the transformer - big counterpart by 1 ) proper use of layer normalization and 2 ) a novel way of passing the combination of previous layers to the next .", "on wmt \u2019 16 english - german and nist openmt \u2019 12 chinese - english tasks , our deep system ( 30 / 25 - layer encoder ) outperforms the shallow transformer - big / base baseline ( 6 - layer encoder ) by 0 . 4 - 2 . 4 bleu points .", "as another bonus , the deep model is 1 . 6x smaller in size and 3x faster in training than transformer - big ."], "events": [{"event_type": "ITT", "arguments": [{"text": "in recent machine translation evaluations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "recent", "machine", "translation", "evaluations"], "offsets": [11, 12, 13, 14, 15]}, {"text": "transformer", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["transformer"], "offsets": [0]}], "trigger": {"text": "state - of - the - art model", "tokens": ["state", "-", "of", "-", "the", "-", "art", "model"], "offsets": [3, 4, 5, 6, 7, 8, 9, 10]}}, {"event_type": "RWF", "arguments": [{"text": "deeper language representation", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["deeper", "language", "representation"], "offsets": [64, 65, 66]}, {"text": "difficulty", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["difficulty"], "offsets": [70]}, {"text": "arising from learning deep networks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["arising", "from", "learning", "deep", "networks"], "offsets": [71, 72, 73, 74, 75]}], "trigger": {"text": "faces", "tokens": ["faces"], "offsets": [68]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [89]}, {"text": "surpass", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["surpass"], "offsets": [98]}], "trigger": {"text": "claim", "tokens": ["claim"], "offsets": [90]}}, {"event_type": "CMP", "arguments": [{"text": "truly deep transformer model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["truly", "deep", "transformer", "model"], "offsets": [93, 94, 95, 96]}, {"text": "surpass", "nugget_type": "STR", "argument_type": "Result", "tokens": ["surpass"], "offsets": [98]}], "trigger": {"text": "surpass", "tokens": ["surpass"], "offsets": [98]}}, {"event_type": "WKS", "arguments": [{"text": "layer normalization", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["layer", "normalization"], "offsets": [110, 111]}], "trigger": {"text": "proper use", "tokens": ["proper", "use"], "offsets": [107, 108]}}, {"event_type": "CMP", "arguments": [{"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [157]}, {"text": "shallow transformer - big / base baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["shallow", "transformer", "-", "big", "/", "base", "baseline"], "offsets": [159, 160, 161, 162, 163, 164, 165]}, {"text": "deep system", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["deep", "system"], "offsets": [147, 148]}, {"text": "0 . 4 - 2 . 4", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["0", ".", "4", "-", "2", ".", "4"], "offsets": [173, 174, 175, 176, 177, 178, 179]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [157]}}, {"event_type": "CMP", "arguments": [{"text": "deep model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["deep", "model"], "offsets": [188, 189]}, {"text": "smaller", "nugget_type": "STR", "argument_type": "Result", "tokens": ["smaller"], "offsets": [194]}, {"text": "1 . 6x", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["1", ".", "6x"], "offsets": [191, 192, 193]}, {"text": "size", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["size"], "offsets": [196]}], "trigger": {"text": "smaller", "tokens": ["smaller"], "offsets": [194]}}, {"event_type": "CMP", "arguments": [{"text": "deep model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["deep", "model"], "offsets": [188, 189]}, {"text": "faster", "nugget_type": "STR", "argument_type": "Result", "tokens": ["faster"], "offsets": [199]}, {"text": "transformer - big", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["transformer", "-", "big"], "offsets": [203, 204, 205]}], "trigger": {"text": "faster", "tokens": ["faster"], "offsets": [199]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [89]}, {"text": "novel way", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["novel", "way"], "offsets": [116, 117]}, {"text": "passing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["passing"], "offsets": [119]}], "trigger": {"text": "claim", "tokens": ["claim"], "offsets": [90]}}, {"event_type": "PUR", "arguments": [{"text": "combination of previous layers to the next", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["combination", "of", "previous", "layers", "to", "the", "next"], "offsets": [121, 122, 123, 124, 125, 126, 127]}], "trigger": {"text": "passing", "tokens": ["passing"], "offsets": [119]}}], "document": ["transformer", "is", "the", "state", "-", "of", "-", "the", "-", "art", "model", "in", "recent", "machine", "translation", "evaluations", ".", "two", "strands", "of", "research", "are", "promising", "to", "improve", "models", "of", "this", "kind", ":", "the", "first", "uses", "wide", "networks", "(", "a", ".", "k", ".", "a", ".", "transformer", "-", "big", ")", "and", "has", "been", "the", "de", "facto", "standard", "for", "development", "of", "the", "transformer", "system", ",", "and", "the", "other", "uses", "deeper", "language", "representation", "but", "faces", "the", "difficulty", "arising", "from", "learning", "deep", "networks", ".", "here", ",", "we", "continue", "the", "line", "of", "research", "on", "the", "latter", ".", "we", "claim", "that", "a", "truly", "deep", "transformer", "model", "can", "surpass", "the", "transformer", "-", "big", "counterpart", "by", "1", ")", "proper", "use", "of", "layer", "normalization", "and", "2", ")", "a", "novel", "way", "of", "passing", "the", "combination", "of", "previous", "layers", "to", "the", "next", ".", "on", "wmt", "\u2019", "16", "english", "-", "german", "and", "nist", "openmt", "\u2019", "12", "chinese", "-", "english", "tasks", ",", "our", "deep", "system", "(", "30", "/", "25", "-", "layer", "encoder", ")", "outperforms", "the", "shallow", "transformer", "-", "big", "/", "base", "baseline", "(", "6", "-", "layer", "encoder", ")", "by", "0", ".", "4", "-", "2", ".", "4", "bleu", "points", ".", "as", "another", "bonus", ",", "the", "deep", "model", "is", "1", ".", "6x", "smaller", "in", "size", "and", "3x", "faster", "in", "training", "than", "transformer", "-", "big", "."]}, {"venue": "ACL", "title": "Answering Open-Domain Multi-Answer Questions via a Recall-then-Verify Framework", "abstract": "Open-domain questions are likely to be open-ended and ambiguous, leading to multiple valid answers. Existing approaches typically adopt the rerank-then-read framework, where a reader reads top-ranking evidence to predict answers. According to our empirical analysis, this framework faces three problems: first, to leverage a large reader under a memory constraint, the reranker should select only a few relevant passages to cover diverse answers, while balancing relevance and diversity is non-trivial; second, the small reading budget prevents the reader from accessing valuable retrieved evidence filtered out by the reranker; third, when using a generative reader to predict answers all at once based on all selected evidence, whether a valid answer will be predicted also pathologically depends on evidence of some other valid answer(s). To address these issues, we propose to answer open-domain multi-answer questions with a recall-then-verify framework, which separates the reasoning process of each answer so that we can make better use of retrieved evidence while also leveraging large models under the same memory constraint. Our framework achieves state-of-the-art results on two multi-answer datasets, and predicts significantly more gold answers than a rerank-then-read system that uses an oracle reranker.", "doc_id": "c6e3b52df86328ab685aa96b60b7c8c4", "publication_year": 2022, "sentences": ["open - domain questions are likely to be open - ended and ambiguous , leading to multiple valid answers .", "existing approaches typically adopt the rerank - then - read framework , where a reader reads top - ranking evidence to predict answers .", "according to our empirical analysis , this framework faces three problems : first , to leverage a large reader under a memory constraint , the reranker should select only a few relevant passages to cover diverse answers , while balancing relevance and diversity is non - trivial ; second , the small reading budget prevents the reader from accessing valuable retrieved evidence filtered out by the reranker ; third , when using a generative reader to predict answers all at once based on all selected evidence , whether a valid answer will be predicted also pathologically depends on evidence of some other valid answer ( s ) .", "to address these issues , we propose to answer open - domain multi - answer questions with a recall - then - verify framework , which separates the reasoning process of each answer so that we can make better use of retrieved evidence while also leveraging large models under the same memory constraint .", "our framework achieves state - of - the - art results on two multi - answer datasets , and predicts significantly more gold answers than a rerank - then - read system that uses an oracle reranker ."], "events": [{"event_type": "ITT", "arguments": [{"text": "open - domain questions", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["open", "-", "domain", "questions"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "likely", "tokens": ["likely"], "offsets": [5]}}, {"event_type": "RWS", "arguments": [{"text": "rerank - then - read framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["rerank", "-", "then", "-", "read", "framework"], "offsets": [25, 26, 27, 28, 29, 30]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [41]}, {"text": "top - ranking evidence", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["top", "-", "ranking", "evidence"], "offsets": [36, 37, 38, 39]}], "trigger": {"text": "reads", "tokens": ["reads"], "offsets": [35]}}, {"event_type": "PUR", "arguments": [{"text": "answers", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["answers"], "offsets": [42]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [41]}}, {"event_type": "RWS", "arguments": [{"text": "leverage", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["leverage"], "offsets": [59]}, {"text": "few relevant passages", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["few", "relevant", "passages"], "offsets": [74, 75, 76]}, {"text": "diverse answers", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["diverse", "answers"], "offsets": [79, 80]}], "trigger": {"text": "cover", "tokens": ["cover"], "offsets": [78]}}, {"event_type": "PUR", "arguments": [{"text": "large reader", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["large", "reader"], "offsets": [61, 62]}, {"text": "under a memory constraint", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["under", "a", "memory", "constraint"], "offsets": [63, 64, 65, 66]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [59]}}, {"event_type": "RWF", "arguments": [{"text": "relevance and diversity", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["relevance", "and", "diversity"], "offsets": [84, 85, 86]}], "trigger": {"text": "non - trivial", "tokens": ["non", "-", "trivial"], "offsets": [88, 89, 90]}}, {"event_type": "RWF", "arguments": [{"text": "small reading budget", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["small", "reading", "budget"], "offsets": [95, 96, 97]}], "trigger": {"text": "prevents", "tokens": ["prevents"], "offsets": [98]}}, {"event_type": "RWS", "arguments": [{"text": "reranker", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["reranker"], "offsets": [110]}, {"text": "valuable retrieved evidence", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["valuable", "retrieved", "evidence"], "offsets": [103, 104, 105]}], "trigger": {"text": "filtered", "tokens": ["filtered"], "offsets": [106]}}, {"event_type": "RWS", "arguments": [{"text": "generative reader", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["generative", "reader"], "offsets": [117, 118]}, {"text": "answers", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["answers"], "offsets": [121]}, {"text": "at once", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "once"], "offsets": [123, 124]}, {"text": "based on all selected evidence", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "all", "selected", "evidence"], "offsets": [125, 126, 127, 128, 129]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [120]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [157]}, {"text": "answer", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["answer"], "offsets": [160]}, {"text": "recall - then - verify framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["recall", "-", "then", "-", "verify", "framework"], "offsets": [170, 171, 172, 173, 174, 175]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [158]}}, {"event_type": "PUR", "arguments": [{"text": "open - domain multi - answer questions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["open", "-", "domain", "multi", "-", "answer", "questions"], "offsets": [161, 162, 163, 164, 165, 166, 167]}], "trigger": {"text": "answer", "tokens": ["answer"], "offsets": [160]}}, {"event_type": "MDS", "arguments": [{"text": "make better use of", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["make", "better", "use", "of"], "offsets": [189, 190, 191, 192]}, {"text": "reasoning process of each answer", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["reasoning", "process", "of", "each", "answer"], "offsets": [180, 181, 182, 183, 184]}, {"text": "leveraging", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["leveraging"], "offsets": [197]}], "trigger": {"text": "separates", "tokens": ["separates"], "offsets": [178]}}, {"event_type": "PUR", "arguments": [{"text": "retrieved evidence", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["retrieved", "evidence"], "offsets": [193, 194]}], "trigger": {"text": "make better use of", "tokens": ["make", "better", "use", "of"], "offsets": [189, 190, 191, 192]}}, {"event_type": "PUR", "arguments": [{"text": "large models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["large", "models"], "offsets": [198, 199]}, {"text": "under the same memory constraint", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["under", "the", "same", "memory", "constraint"], "offsets": [200, 201, 202, 203, 204]}], "trigger": {"text": "leveraging", "tokens": ["leveraging"], "offsets": [197]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art results", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [209, 210, 211, 212, 213, 214, 215, 216]}, {"text": "recall - then - verify framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["recall", "-", "then", "-", "verify", "framework"], "offsets": [170, 171, 172, 173, 174, 175]}, {"text": "two multi - answer datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "multi", "-", "answer", "datasets"], "offsets": [218, 219, 220, 221, 222]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [208]}}, {"event_type": "CMP", "arguments": [{"text": "more gold answers", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "gold", "answers"], "offsets": [227, 228, 229]}, {"text": "rerank - then - read system", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["rerank", "-", "then", "-", "read", "system"], "offsets": [232, 233, 234, 235, 236, 237]}, {"text": "recall - then - verify framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["recall", "-", "then", "-", "verify", "framework"], "offsets": [170, 171, 172, 173, 174, 175]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [226]}], "trigger": {"text": "predicts", "tokens": ["predicts"], "offsets": [225]}}], "document": ["open", "-", "domain", "questions", "are", "likely", "to", "be", "open", "-", "ended", "and", "ambiguous", ",", "leading", "to", "multiple", "valid", "answers", ".", "existing", "approaches", "typically", "adopt", "the", "rerank", "-", "then", "-", "read", "framework", ",", "where", "a", "reader", "reads", "top", "-", "ranking", "evidence", "to", "predict", "answers", ".", "according", "to", "our", "empirical", "analysis", ",", "this", "framework", "faces", "three", "problems", ":", "first", ",", "to", "leverage", "a", "large", "reader", "under", "a", "memory", "constraint", ",", "the", "reranker", "should", "select", "only", "a", "few", "relevant", "passages", "to", "cover", "diverse", "answers", ",", "while", "balancing", "relevance", "and", "diversity", "is", "non", "-", "trivial", ";", "second", ",", "the", "small", "reading", "budget", "prevents", "the", "reader", "from", "accessing", "valuable", "retrieved", "evidence", "filtered", "out", "by", "the", "reranker", ";", "third", ",", "when", "using", "a", "generative", "reader", "to", "predict", "answers", "all", "at", "once", "based", "on", "all", "selected", "evidence", ",", "whether", "a", "valid", "answer", "will", "be", "predicted", "also", "pathologically", "depends", "on", "evidence", "of", "some", "other", "valid", "answer", "(", "s", ")", ".", "to", "address", "these", "issues", ",", "we", "propose", "to", "answer", "open", "-", "domain", "multi", "-", "answer", "questions", "with", "a", "recall", "-", "then", "-", "verify", "framework", ",", "which", "separates", "the", "reasoning", "process", "of", "each", "answer", "so", "that", "we", "can", "make", "better", "use", "of", "retrieved", "evidence", "while", "also", "leveraging", "large", "models", "under", "the", "same", "memory", "constraint", ".", "our", "framework", "achieves", "state", "-", "of", "-", "the", "-", "art", "results", "on", "two", "multi", "-", "answer", "datasets", ",", "and", "predicts", "significantly", "more", "gold", "answers", "than", "a", "rerank", "-", "then", "-", "read", "system", "that", "uses", "an", "oracle", "reranker", "."]}, {"venue": "ACL", "title": "Gender-preserving Debiasing for Pre-trained Word Embeddings", "abstract": "Word embeddings learnt from massive text collections have demonstrated significant levels of discriminative biases such as gender, racial or ethnic biases, which in turn bias the down-stream NLP applications that use those word embeddings. Taking gender-bias as a working example, we propose a debiasing method that preserves non-discriminative gender-related information, while removing stereotypical discriminative gender biases from pre-trained word embeddings. Specifically, we consider four types of information: feminine, masculine, gender-neutral and stereotypical, which represent the relationship between gender vs. bias, and propose a debiasing method that (a) preserves the gender-related information in feminine and masculine words, (b) preserves the neutrality in gender-neutral words, and (c) removes the biases from stereotypical words. Experimental results on several previously proposed benchmark datasets show that our proposed method can debias pre-trained word embeddings better than existing SoTA methods proposed for debiasing word embeddings while preserving gender-related but non-discriminative information.", "doc_id": "e0f13560519a4416cdf6b551a3f885d7", "publication_year": 2019, "sentences": ["word embeddings learnt from massive text collections have demonstrated significant levels of discriminative biases such as gender , racial or ethnic biases , which in turn bias the down - stream nlp applications that use those word embeddings .", "taking gender - bias as a working example , we propose a debiasing method that preserves non - discriminative gender - related information , while removing stereotypical discriminative gender biases from pre - trained word embeddings .", "specifically , we consider four types of information : feminine , masculine , gender - neutral and stereotypical , which represent the relationship between gender vs . bias , and propose a debiasing method that ( a ) preserves the gender - related information in feminine and masculine words , ( b ) preserves the neutrality in gender - neutral words , and ( c ) removes the biases from stereotypical words .", "experimental results on several previously proposed benchmark datasets show that our proposed method can debias pre - trained word embeddings better than existing sota methods proposed for debiasing word embeddings while preserving gender - related but non - discriminative information ."], "events": [{"event_type": "ITT", "arguments": [{"text": "word embeddings", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["word", "embeddings"], "offsets": [0, 1]}], "trigger": {"text": "demonstrated", "tokens": ["demonstrated"], "offsets": [8]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [48]}, {"text": "debiasing method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["debiasing", "method"], "offsets": [51, 52]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [49]}}, {"event_type": "MDS", "arguments": [{"text": "non - discriminative gender - related information", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["non", "-", "discriminative", "gender", "-", "related", "information"], "offsets": [55, 56, 57, 58, 59, 60, 61]}], "trigger": {"text": "preserves", "tokens": ["preserves"], "offsets": [54]}}, {"event_type": "MDS", "arguments": [{"text": "stereotypical discriminative gender biases", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["stereotypical", "discriminative", "gender", "biases"], "offsets": [65, 66, 67, 68]}, {"text": "pre - trained word embeddings", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["pre", "-", "trained", "word", "embeddings"], "offsets": [70, 71, 72, 73, 74]}], "trigger": {"text": "removing", "tokens": ["removing"], "offsets": [64]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [78]}, {"text": "feminine", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["feminine"], "offsets": [85]}, {"text": "masculine", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["masculine"], "offsets": [87]}, {"text": "gender - neutral", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["gender", "-", "neutral"], "offsets": [89, 90, 91]}, {"text": "stereotypical", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["stereotypical"], "offsets": [93]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [79]}}, {"event_type": "PRP", "arguments": [{"text": "debiasing method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["debiasing", "method"], "offsets": [108, 109]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [78]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [106]}}, {"event_type": "MDS", "arguments": [{"text": "gender - related information", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["gender", "-", "related", "information"], "offsets": [116, 117, 118, 119]}, {"text": "in feminine and masculine words", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "feminine", "and", "masculine", "words"], "offsets": [120, 121, 122, 123, 124]}], "trigger": {"text": "preserves", "tokens": ["preserves"], "offsets": [114]}}, {"event_type": "WKS", "arguments": [{"text": "neutrality", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["neutrality"], "offsets": [131]}, {"text": "in gender - neutral words", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "gender", "-", "neutral", "words"], "offsets": [132, 133, 134, 135, 136]}], "trigger": {"text": "preserves", "tokens": ["preserves"], "offsets": [129]}}, {"event_type": "MDS", "arguments": [{"text": "biases", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["biases"], "offsets": [144]}, {"text": "stereotypical words", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["stereotypical", "words"], "offsets": [146, 147]}], "trigger": {"text": "removes", "tokens": ["removes"], "offsets": [142]}}, {"event_type": "FIN", "arguments": [{"text": "debias", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["debias"], "offsets": [163]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [157]}}, {"event_type": "CMP", "arguments": [{"text": "debiasing method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["debiasing", "method"], "offsets": [108, 109]}, {"text": "pre - trained word embeddings", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["pre", "-", "trained", "word", "embeddings"], "offsets": [164, 165, 166, 167, 168]}, {"text": "existing sota methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "sota", "methods"], "offsets": [171, 172, 173]}, {"text": "proposed for debiasing word embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["proposed", "for", "debiasing", "word", "embeddings"], "offsets": [174, 175, 176, 177, 178]}, {"text": "several previously proposed benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["several", "previously", "proposed", "benchmark", "datasets"], "offsets": [152, 153, 154, 155, 156]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [169]}], "trigger": {"text": "debias", "tokens": ["debias"], "offsets": [163]}}, {"event_type": "FIN", "arguments": [{"text": "preserving", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["preserving"], "offsets": [180]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [157]}}, {"event_type": "FAC", "arguments": [{"text": "debiasing method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["debiasing", "method"], "offsets": [108, 109]}, {"text": "several previously proposed benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["several", "previously", "proposed", "benchmark", "datasets"], "offsets": [152, 153, 154, 155, 156]}, {"text": "gender - related information", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["gender", "-", "related", "information"], "offsets": [181, 182, 183, 188]}, {"text": "non - discriminative information", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["non", "-", "discriminative", "information"], "offsets": [185, 186, 187, 188]}], "trigger": {"text": "preserving", "tokens": ["preserving"], "offsets": [180]}}], "document": ["word", "embeddings", "learnt", "from", "massive", "text", "collections", "have", "demonstrated", "significant", "levels", "of", "discriminative", "biases", "such", "as", "gender", ",", "racial", "or", "ethnic", "biases", ",", "which", "in", "turn", "bias", "the", "down", "-", "stream", "nlp", "applications", "that", "use", "those", "word", "embeddings", ".", "taking", "gender", "-", "bias", "as", "a", "working", "example", ",", "we", "propose", "a", "debiasing", "method", "that", "preserves", "non", "-", "discriminative", "gender", "-", "related", "information", ",", "while", "removing", "stereotypical", "discriminative", "gender", "biases", "from", "pre", "-", "trained", "word", "embeddings", ".", "specifically", ",", "we", "consider", "four", "types", "of", "information", ":", "feminine", ",", "masculine", ",", "gender", "-", "neutral", "and", "stereotypical", ",", "which", "represent", "the", "relationship", "between", "gender", "vs", ".", "bias", ",", "and", "propose", "a", "debiasing", "method", "that", "(", "a", ")", "preserves", "the", "gender", "-", "related", "information", "in", "feminine", "and", "masculine", "words", ",", "(", "b", ")", "preserves", "the", "neutrality", "in", "gender", "-", "neutral", "words", ",", "and", "(", "c", ")", "removes", "the", "biases", "from", "stereotypical", "words", ".", "experimental", "results", "on", "several", "previously", "proposed", "benchmark", "datasets", "show", "that", "our", "proposed", "method", "can", "debias", "pre", "-", "trained", "word", "embeddings", "better", "than", "existing", "sota", "methods", "proposed", "for", "debiasing", "word", "embeddings", "while", "preserving", "gender", "-", "related", "but", "non", "-", "discriminative", "information", "."]}, {"venue": "ACL", "title": "Prosodic segmentation for parsing spoken dialogue", "abstract": "Parsing spoken dialogue poses unique difficulties, including disfluencies and unmarked boundaries between sentence-like units. Previous work has shown that prosody can help with parsing disfluent speech (Tran et al. 2018), but has assumed that the input to the parser is already segmented into sentence-like units (SUs), which isn\u2019t true in existing speech applications. We investigate how prosody affects a parser that receives an entire dialogue turn as input (a turn-based model), instead of gold standard pre-segmented SUs (an SU-based model). In experiments on the English Switchboard corpus, we find that when using transcripts alone, the turn-based model has trouble segmenting SUs, leading to worse parse performance than the SU-based model. However, prosody can effectively replace gold standard SU boundaries: with prosody, the turn-based model performs as well as the SU-based model (91.38 vs. 91.06 F1 score, respectively), despite performing two tasks (SU segmentation and parsing) rather than one (parsing alone). Analysis shows that pitch and intensity features are the most important for this corpus, since they allow the model to correctly distinguish an SU boundary from a speech disfluency \u2013 a distinction that the model otherwise struggles to make.", "doc_id": "de116cc2d2ed149e7880fe02e9d0335b", "publication_year": 2021, "sentences": ["parsing spoken dialogue poses unique difficulties , including disfluencies and unmarked boundaries between sentence - like units .", "previous work has shown that prosody can help with parsing disfluent speech ( tran et al . 2018 ) , but has assumed that the input to the parser is already segmented into sentence - like units ( sus ) , which isn \u2019 t true in existing speech applications .", "we investigate how prosody affects a parser that receives an entire dialogue turn as input ( a turn - based model ) , instead of gold standard pre - segmented sus ( an su - based model ) .", "in experiments on the english switchboard corpus , we find that when using transcripts alone , the turn - based model has trouble segmenting sus , leading to worse parse performance than the su - based model .", "however , prosody can effectively replace gold standard su boundaries : with prosody , the turn - based model performs as well as the su - based model ( 91 . 38 vs . 91 . 06 f1 score , respectively ) , despite performing two tasks ( su segmentation and parsing ) rather than one ( parsing alone ) .", "analysis shows that pitch and intensity features are the most important for this corpus , since they allow the model to correctly distinguish an su boundary from a speech disfluency \u2013 a distinction that the model otherwise struggles to make ."], "events": [{"event_type": "RWF", "arguments": [{"text": "parsing spoken dialogue", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["parsing", "spoken", "dialogue"], "offsets": [0, 1, 2]}, {"text": "difficulties", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["difficulties"], "offsets": [5]}, {"text": "disfluencies", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["disfluencies"], "offsets": [8]}, {"text": "unmarked boundaries between sentence - like units", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unmarked", "boundaries", "between", "sentence", "-", "like", "units"], "offsets": [10, 11, 12, 13, 14, 15, 16]}], "trigger": {"text": "poses", "tokens": ["poses"], "offsets": [3]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [69]}, {"text": "prosody", "nugget_type": "APP", "argument_type": "Content", "tokens": ["prosody"], "offsets": [72]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [70]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [116]}, {"text": "has", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["has"], "offsets": [129]}, {"text": "leading", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["leading"], "offsets": [134]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [117]}}, {"event_type": "FAC", "arguments": [{"text": "when using transcripts alone", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "using", "transcripts", "alone"], "offsets": [119, 120, 121, 122]}, {"text": "turn - based model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["turn", "-", "based", "model"], "offsets": [125, 126, 127, 128]}, {"text": "trouble", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["trouble"], "offsets": [130]}, {"text": "segmenting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["segmenting"], "offsets": [131]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [129]}}, {"event_type": "PUR", "arguments": [{"text": "sus", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["sentence", "-", "like", "units"], "offsets": [51, 52, 53, 54]}], "trigger": {"text": "segmenting", "tokens": ["segmenting"], "offsets": [131]}}, {"event_type": "CMP", "arguments": [{"text": "when using transcripts alone", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "using", "transcripts", "alone"], "offsets": [119, 120, 121, 122]}, {"text": "turn - based model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["turn", "-", "based", "model"], "offsets": [125, 126, 127, 128]}, {"text": "worse parse performance", "nugget_type": "WEA", "argument_type": "Result", "tokens": ["worse", "parse", "performance"], "offsets": [136, 137, 138]}, {"text": "su - based model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["su", "-", "based", "model"], "offsets": [141, 142, 143, 144]}], "trigger": {"text": "leading", "tokens": ["leading"], "offsets": [134]}}, {"event_type": "FAC", "arguments": [{"text": "prosody", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["prosody"], "offsets": [148]}, {"text": "gold standard su boundaries", "nugget_type": "MOD", "argument_type": "Object", "tokens": ["gold", "standard", "su", "boundaries"], "offsets": [152, 153, 154, 155]}], "trigger": {"text": "effectively replace", "tokens": ["effectively", "replace"], "offsets": [150, 151]}}, {"event_type": "CMP", "arguments": [{"text": "with prosody", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "prosody"], "offsets": [157, 158]}, {"text": "turn - based model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["turn", "-", "based", "model"], "offsets": [161, 162, 163, 164]}, {"text": "su - based model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["su", "-", "based", "model"], "offsets": [170, 171, 172, 173]}, {"text": "as well as", "nugget_type": "STR", "argument_type": "Result", "tokens": ["as", "well", "as"], "offsets": [166, 167, 168]}], "trigger": {"text": "performs", "tokens": ["performs"], "offsets": [165]}}, {"event_type": "FIN", "arguments": [{"text": "most important", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["most", "important"], "offsets": [216, 217]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [208]}}, {"event_type": "FAC", "arguments": [{"text": "pitch and intensity features", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["pitch", "and", "intensity", "features"], "offsets": [210, 211, 212, 213]}, {"text": "english switchboard corpus", "nugget_type": "DST", "argument_type": "Object", "tokens": ["english", "switchboard", "corpus"], "offsets": [112, 113, 114]}, {"text": "since they allow the model to correctly distinguish an su boundary from a speech disfluency \u2013 a distinction", "nugget_type": "LIM", "argument_type": "Reason", "tokens": ["since", "they", "allow", "the", "model", "to", "correctly", "distinguish", "an", "su", "boundary", "from", "a", "speech", "disfluency", "\u2013", "a", "distinction"], "offsets": [222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239]}], "trigger": {"text": "most important", "tokens": ["most", "important"], "offsets": [216, 217]}}, {"event_type": "RWF", "arguments": [{"text": "in existing speech applications", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "existing", "speech", "applications"], "offsets": [64, 65, 66, 67]}, {"text": "input to the parser", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["input", "to", "the", "parser"], "offsets": [43, 44, 45, 46]}, {"text": "sentence - like units", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sentence", "-", "like", "units"], "offsets": [51, 52, 53, 54]}, {"text": "isn \u2019 t true", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["isn", "\u2019", "t", "true"], "offsets": [60, 61, 62, 63]}], "trigger": {"text": "segmented", "tokens": ["segmented"], "offsets": [49]}}], "document": ["parsing", "spoken", "dialogue", "poses", "unique", "difficulties", ",", "including", "disfluencies", "and", "unmarked", "boundaries", "between", "sentence", "-", "like", "units", ".", "previous", "work", "has", "shown", "that", "prosody", "can", "help", "with", "parsing", "disfluent", "speech", "(", "tran", "et", "al", ".", "2018", ")", ",", "but", "has", "assumed", "that", "the", "input", "to", "the", "parser", "is", "already", "segmented", "into", "sentence", "-", "like", "units", "(", "sus", ")", ",", "which", "isn", "\u2019", "t", "true", "in", "existing", "speech", "applications", ".", "we", "investigate", "how", "prosody", "affects", "a", "parser", "that", "receives", "an", "entire", "dialogue", "turn", "as", "input", "(", "a", "turn", "-", "based", "model", ")", ",", "instead", "of", "gold", "standard", "pre", "-", "segmented", "sus", "(", "an", "su", "-", "based", "model", ")", ".", "in", "experiments", "on", "the", "english", "switchboard", "corpus", ",", "we", "find", "that", "when", "using", "transcripts", "alone", ",", "the", "turn", "-", "based", "model", "has", "trouble", "segmenting", "sus", ",", "leading", "to", "worse", "parse", "performance", "than", "the", "su", "-", "based", "model", ".", "however", ",", "prosody", "can", "effectively", "replace", "gold", "standard", "su", "boundaries", ":", "with", "prosody", ",", "the", "turn", "-", "based", "model", "performs", "as", "well", "as", "the", "su", "-", "based", "model", "(", "91", ".", "38", "vs", ".", "91", ".", "06", "f1", "score", ",", "respectively", ")", ",", "despite", "performing", "two", "tasks", "(", "su", "segmentation", "and", "parsing", ")", "rather", "than", "one", "(", "parsing", "alone", ")", ".", "analysis", "shows", "that", "pitch", "and", "intensity", "features", "are", "the", "most", "important", "for", "this", "corpus", ",", "since", "they", "allow", "the", "model", "to", "correctly", "distinguish", "an", "su", "boundary", "from", "a", "speech", "disfluency", "\u2013", "a", "distinction", "that", "the", "model", "otherwise", "struggles", "to", "make", "."]}, {"venue": "ACL", "title": "Inducing Positive Perspectives with Text Reframing", "abstract": "Sentiment transfer is one popular example of a text style transfer task, where the goal is to reverse the sentiment polarity of a text. With a sentiment reversal comes also a reversal in meaning. We introduce a different but related task called positive reframing in which we neutralize a negative point of view and generate a more positive perspective for the author without contradicting the original meaning. Our insistence on meaning preservation makes positive reframing a challenging and semantically rich task. To facilitate rapid progress, we introduce a large-scale benchmark, Positive Psychology Frames, with 8,349 sentence pairs and 12,755 structured annotations to explain positive reframing in terms of six theoretically-motivated reframing strategies. Then we evaluate a set of state-of-the-art text style transfer models, and conclude by discussing key challenges and directions for future work.", "doc_id": "1957099b786a71c3555dde4395fcc230", "publication_year": 2022, "sentences": ["sentiment transfer is one popular example of a text style transfer task , where the goal is to reverse the sentiment polarity of a text .", "with a sentiment reversal comes also a reversal in meaning .", "we introduce a different but related task called positive reframing in which we neutralize a negative point of view and generate a more positive perspective for the author without contradicting the original meaning .", "our insistence on meaning preservation makes positive reframing a challenging and semantically rich task .", "to facilitate rapid progress , we introduce a large - scale benchmark , positive psychology frames , with 8 , 349 sentence pairs and 12 , 755 structured annotations to explain positive reframing in terms of six theoretically - motivated reframing strategies .", "then we evaluate a set of state - of - the - art text style transfer models , and conclude by discussing key challenges and directions for future work ."], "events": [{"event_type": "ITT", "arguments": [{"text": "sentiment transfer", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sentiment", "transfer"], "offsets": [0, 1]}], "trigger": {"text": "example", "tokens": ["example"], "offsets": [5]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [37]}, {"text": "positive reframing", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["positive", "reframing"], "offsets": [45, 46]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [38]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [49]}, {"text": "negative point of view", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["negative", "point", "of", "view"], "offsets": [52, 53, 54, 55]}], "trigger": {"text": "neutralize", "tokens": ["neutralize"], "offsets": [50]}}, {"event_type": "WKS", "arguments": [{"text": "positive perspective", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["positive", "perspective"], "offsets": [60, 61]}, {"text": "without contradicting the original meaning", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "contradicting", "the", "original", "meaning"], "offsets": [65, 66, 67, 68, 69]}, {"text": "author", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["author"], "offsets": [64]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [57]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [91]}, {"text": "positive psychology frames", "nugget_type": "APP", "argument_type": "Content", "tokens": ["positive", "psychology", "frames"], "offsets": [99, 100, 101]}, {"text": "explain", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["explain"], "offsets": [116]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [92]}}, {"event_type": "PUR", "arguments": [{"text": "positive reframing", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["positive", "reframing"], "offsets": [117, 118]}, {"text": "in terms of six theoretically - motivated reframing strategies", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "terms", "of", "six", "theoretically", "-", "motivated", "reframing", "strategies"], "offsets": [119, 120, 121, 122, 123, 124, 125, 126, 127]}], "trigger": {"text": "explain", "tokens": ["explain"], "offsets": [116]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [130]}, {"text": "state - of - the - art text style transfer models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["state", "-", "of", "-", "the", "-", "art", "text", "style", "transfer", "models"], "offsets": [135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [131]}}, {"event_type": "WKS", "arguments": [{"text": "key challenges and directions", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["key", "challenges", "and", "directions"], "offsets": [151, 152, 153, 154]}, {"text": "future work", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["future", "work"], "offsets": [156, 157]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [130]}], "trigger": {"text": "discussing", "tokens": ["discussing"], "offsets": [150]}}], "document": ["sentiment", "transfer", "is", "one", "popular", "example", "of", "a", "text", "style", "transfer", "task", ",", "where", "the", "goal", "is", "to", "reverse", "the", "sentiment", "polarity", "of", "a", "text", ".", "with", "a", "sentiment", "reversal", "comes", "also", "a", "reversal", "in", "meaning", ".", "we", "introduce", "a", "different", "but", "related", "task", "called", "positive", "reframing", "in", "which", "we", "neutralize", "a", "negative", "point", "of", "view", "and", "generate", "a", "more", "positive", "perspective", "for", "the", "author", "without", "contradicting", "the", "original", "meaning", ".", "our", "insistence", "on", "meaning", "preservation", "makes", "positive", "reframing", "a", "challenging", "and", "semantically", "rich", "task", ".", "to", "facilitate", "rapid", "progress", ",", "we", "introduce", "a", "large", "-", "scale", "benchmark", ",", "positive", "psychology", "frames", ",", "with", "8", ",", "349", "sentence", "pairs", "and", "12", ",", "755", "structured", "annotations", "to", "explain", "positive", "reframing", "in", "terms", "of", "six", "theoretically", "-", "motivated", "reframing", "strategies", ".", "then", "we", "evaluate", "a", "set", "of", "state", "-", "of", "-", "the", "-", "art", "text", "style", "transfer", "models", ",", "and", "conclude", "by", "discussing", "key", "challenges", "and", "directions", "for", "future", "work", "."]}, {"venue": "ACL", "title": "A Closer Look at How Fine-tuning Changes BERT", "abstract": "Given the prevalence of pre-trained contextualized representations in today\u2019s NLP, there have been many efforts to understand what information they contain, and why they seem to be universally successful. The most common approach to use these representations involves fine-tuning them for an end task. Yet, how fine-tuning changes the underlying embedding space is less studied. In this work, we study the English BERT family and use two probing techniques to analyze how fine-tuning changes the space. We hypothesize that fine-tuning affects classification performance by increasing the distances between examples associated with different labels. We confirm this hypothesis with carefully designed experiments on five different NLP tasks. Via these experiments, we also discover an exception to the prevailing wisdom that \u201cfine-tuning always improves performance\u201d. Finally, by comparing the representations before and after fine-tuning, we discover that fine-tuning does not introduce arbitrary changes to representations; instead, it adjusts the representations to downstream tasks while largely preserving the original spatial structure of the data points.", "doc_id": "1abc2b8815f3288702b641e7c9b4f03e", "publication_year": 2022, "sentences": ["given the prevalence of pre - trained contextualized representations in today \u2019 s nlp , there have been many efforts to understand what information they contain , and why they seem to be universally successful .", "the most common approach to use these representations involves fine - tuning them for an end task .", "yet , how fine - tuning changes the underlying embedding space is less studied .", "in this work , we study the english bert family and use two probing techniques to analyze how fine - tuning changes the space .", "we hypothesize that fine - tuning affects classification performance by increasing the distances between examples associated with different labels .", "we confirm this hypothesis with carefully designed experiments on five different nlp tasks .", "via these experiments , we also discover an exception to the prevailing wisdom that \u201c fine - tuning always improves performance \u201d .", "finally , by comparing the representations before and after fine - tuning , we discover that fine - tuning does not introduce arbitrary changes to representations ; instead , it adjusts the representations to downstream tasks while largely preserving the original spatial structure of the data points ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pre - trained contextualized representations", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["pre", "-", "trained", "contextualized", "representations"], "offsets": [4, 5, 6, 7, 8]}], "trigger": {"text": "given", "tokens": ["given"], "offsets": [0]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [73]}, {"text": "english bert family", "nugget_type": "APP", "argument_type": "Content", "tokens": ["english", "bert", "family"], "offsets": [76, 77, 78]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [74]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [73]}, {"text": "two probing techniques", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "probing", "techniques"], "offsets": [81, 82, 83]}, {"text": "analyze", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["analyze"], "offsets": [85]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [80]}}, {"event_type": "PUR", "arguments": [{"text": "fine - tuning", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["fine", "-", "tuning"], "offsets": [87, 88, 89]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [85]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [114]}, {"text": "this hypothesis", "nugget_type": "APP", "argument_type": "Content", "tokens": ["this", "hypothesis"], "offsets": [116, 117]}, {"text": "with carefully designed experiments on five different nlp tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "carefully", "designed", "experiments", "on", "five", "different", "nlp", "tasks"], "offsets": [118, 119, 120, 121, 122, 123, 124, 125, 126]}], "trigger": {"text": "confirm", "tokens": ["confirm"], "offsets": [115]}}, {"event_type": "WKS", "arguments": [{"text": "representations before and after fine - tuning", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["representations", "before", "and", "after", "fine", "-", "tuning"], "offsets": [156, 157, 158, 159, 160, 161, 162]}], "trigger": {"text": "comparing", "tokens": ["comparing"], "offsets": [154]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [164]}, {"text": "preserving", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["preserving"], "offsets": [189]}, {"text": "not introduce", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["not", "introduce"], "offsets": [171, 172]}, {"text": "adjusts", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["adjusts"], "offsets": [181]}], "trigger": {"text": "discover", "tokens": ["discover"], "offsets": [165]}}, {"event_type": "FAC", "arguments": [{"text": "fine - tuning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["fine", "-", "tuning"], "offsets": [167, 168, 169]}, {"text": "representations to downstream tasks", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["representations", "to", "downstream", "tasks"], "offsets": [183, 184, 185, 186]}], "trigger": {"text": "adjusts", "tokens": ["adjusts"], "offsets": [181]}}, {"event_type": "FAC", "arguments": [{"text": "fine - tuning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["fine", "-", "tuning"], "offsets": [167, 168, 169]}, {"text": "original spatial structure of the data points", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["original", "spatial", "structure", "of", "the", "data", "points"], "offsets": [191, 192, 193, 194, 195, 196, 197]}], "trigger": {"text": "preserving", "tokens": ["preserving"], "offsets": [189]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [132]}, {"text": "always improves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["always", "improves"], "offsets": [146, 147]}], "trigger": {"text": "discover", "tokens": ["discover"], "offsets": [134]}}, {"event_type": "FAC", "arguments": [{"text": "fine - tuning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["fine", "-", "tuning"], "offsets": [143, 144, 145]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance"], "offsets": [148]}], "trigger": {"text": "always improves", "tokens": ["always", "improves"], "offsets": [146, 147]}}, {"event_type": "FAC", "arguments": [{"text": "fine - tuning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["fine", "-", "tuning"], "offsets": [167, 168, 169]}, {"text": "arbitrary changes", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["arbitrary", "changes"], "offsets": [173, 174]}], "trigger": {"text": "not introduce", "tokens": ["not", "introduce"], "offsets": [171, 172]}}, {"event_type": "FAC", "arguments": [{"text": "fine - tuning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["fine", "-", "tuning"], "offsets": [167, 168, 169]}, {"text": "representations", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["representations"], "offsets": [183]}, {"text": "while largely preserving the original spatial structure of the data points", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "largely", "preserving", "the", "original", "spatial", "structure", "of", "the", "data", "points"], "offsets": [187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197]}], "trigger": {"text": "adjusts", "tokens": ["adjusts"], "offsets": [181]}}], "document": ["given", "the", "prevalence", "of", "pre", "-", "trained", "contextualized", "representations", "in", "today", "\u2019", "s", "nlp", ",", "there", "have", "been", "many", "efforts", "to", "understand", "what", "information", "they", "contain", ",", "and", "why", "they", "seem", "to", "be", "universally", "successful", ".", "the", "most", "common", "approach", "to", "use", "these", "representations", "involves", "fine", "-", "tuning", "them", "for", "an", "end", "task", ".", "yet", ",", "how", "fine", "-", "tuning", "changes", "the", "underlying", "embedding", "space", "is", "less", "studied", ".", "in", "this", "work", ",", "we", "study", "the", "english", "bert", "family", "and", "use", "two", "probing", "techniques", "to", "analyze", "how", "fine", "-", "tuning", "changes", "the", "space", ".", "we", "hypothesize", "that", "fine", "-", "tuning", "affects", "classification", "performance", "by", "increasing", "the", "distances", "between", "examples", "associated", "with", "different", "labels", ".", "we", "confirm", "this", "hypothesis", "with", "carefully", "designed", "experiments", "on", "five", "different", "nlp", "tasks", ".", "via", "these", "experiments", ",", "we", "also", "discover", "an", "exception", "to", "the", "prevailing", "wisdom", "that", "\u201c", "fine", "-", "tuning", "always", "improves", "performance", "\u201d", ".", "finally", ",", "by", "comparing", "the", "representations", "before", "and", "after", "fine", "-", "tuning", ",", "we", "discover", "that", "fine", "-", "tuning", "does", "not", "introduce", "arbitrary", "changes", "to", "representations", ";", "instead", ",", "it", "adjusts", "the", "representations", "to", "downstream", "tasks", "while", "largely", "preserving", "the", "original", "spatial", "structure", "of", "the", "data", "points", "."]}, {"venue": "ACL", "title": "Understanding Game-Playing Agents with Natural Language Annotations", "abstract": "We present a new dataset containing 10K human-annotated games of Go and show how these natural language annotations can be used as a tool for model interpretability. Given a board state and its associated comment, our approach uses linear probing to predict mentions of domain-specific terms (e.g., ko, atari) from the intermediate state representations of game-playing agents like AlphaGo Zero. We find these game concepts are nontrivially encoded in two distinct policy networks, one trained via imitation learning and another trained via reinforcement learning. Furthermore, mentions of domain-specific terms are most easily predicted from the later layers of both models, suggesting that these policy networks encode high-level abstractions similar to those used in the natural language annotations.", "doc_id": "8a3d98bd2a52659b6d04eda2272bc325", "publication_year": 2022, "sentences": ["we present a new dataset containing 10k human - annotated games of go and show how these natural language annotations can be used as a tool for model interpretability .", "given a board state and its associated comment , our approach uses linear probing to predict mentions of domain - specific terms ( e . g . , ko , atari ) from the intermediate state representations of game - playing agents like alphago zero .", "we find these game concepts are nontrivially encoded in two distinct policy networks , one trained via imitation learning and another trained via reinforcement learning .", "furthermore , mentions of domain - specific terms are most easily predicted from the later layers of both models , suggesting that these policy networks encode high - level abstractions similar to those used in the natural language annotations ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["dataset"], "offsets": [4]}, {"text": "go and show", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["go", "and", "show"], "offsets": [12, 13, 14]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "natural language annotations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["natural", "language", "annotations"], "offsets": [17, 18, 19]}], "trigger": {"text": "go and show", "tokens": ["go", "and", "show"], "offsets": [12, 13, 14]}}, {"event_type": "MDS", "arguments": [{"text": "mentions of domain - specific terms", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["mentions", "of", "domain", "-", "specific", "terms"], "offsets": [46, 47, 48, 49, 50, 51]}, {"text": "intermediate state representations of game - playing agents", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["intermediate", "state", "representations", "of", "game", "-", "playing", "agents"], "offsets": [64, 65, 66, 67, 68, 69, 70, 71]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [45]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [76]}, {"text": "encoded", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["encoded"], "offsets": [83]}, {"text": "trained", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["trained"], "offsets": [91]}, {"text": "trained", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["trained"], "offsets": [97]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [77]}}, {"event_type": "FAC", "arguments": [{"text": "in two distinct policy networks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "two", "distinct", "policy", "networks"], "offsets": [84, 85, 86, 87, 88]}, {"text": "game concepts", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["game", "concepts"], "offsets": [79, 80]}], "trigger": {"text": "encoded", "tokens": ["encoded"], "offsets": [83]}}, {"event_type": "FAC", "arguments": [{"text": "game concepts", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["game", "concepts"], "offsets": [79, 80]}, {"text": "via imitation learning", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "imitation", "learning"], "offsets": [92, 93, 94]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [91]}}, {"event_type": "FAC", "arguments": [{"text": "via reinforcement learning", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "reinforcement", "learning"], "offsets": [98, 99, 100]}, {"text": "game concepts", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["game", "concepts"], "offsets": [79, 80]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [97]}}, {"event_type": "FAC", "arguments": [{"text": "mentions of domain - specific terms", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["mentions", "of", "domain", "-", "specific", "terms"], "offsets": [104, 105, 106, 107, 108, 109]}, {"text": "from the later layers of both models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "the", "later", "layers", "of", "both", "models"], "offsets": [114, 115, 116, 117, 118, 119, 120]}], "trigger": {"text": "predicted", "tokens": ["predicted"], "offsets": [113]}}, {"event_type": "FAC", "arguments": [{"text": "policy networks encode high - level abstractions", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["policy", "networks", "encode", "high", "-", "level", "abstractions"], "offsets": [125, 126, 127, 128, 129, 130, 131]}, {"text": "those", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["abstractions"], "offsets": [131]}, {"text": "used in the natural language annotations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["used", "in", "the", "natural", "language", "annotations"], "offsets": [135, 136, 137, 138, 139, 140]}], "trigger": {"text": "similar", "tokens": ["similar"], "offsets": [132]}}], "document": ["we", "present", "a", "new", "dataset", "containing", "10k", "human", "-", "annotated", "games", "of", "go", "and", "show", "how", "these", "natural", "language", "annotations", "can", "be", "used", "as", "a", "tool", "for", "model", "interpretability", ".", "given", "a", "board", "state", "and", "its", "associated", "comment", ",", "our", "approach", "uses", "linear", "probing", "to", "predict", "mentions", "of", "domain", "-", "specific", "terms", "(", "e", ".", "g", ".", ",", "ko", ",", "atari", ")", "from", "the", "intermediate", "state", "representations", "of", "game", "-", "playing", "agents", "like", "alphago", "zero", ".", "we", "find", "these", "game", "concepts", "are", "nontrivially", "encoded", "in", "two", "distinct", "policy", "networks", ",", "one", "trained", "via", "imitation", "learning", "and", "another", "trained", "via", "reinforcement", "learning", ".", "furthermore", ",", "mentions", "of", "domain", "-", "specific", "terms", "are", "most", "easily", "predicted", "from", "the", "later", "layers", "of", "both", "models", ",", "suggesting", "that", "these", "policy", "networks", "encode", "high", "-", "level", "abstractions", "similar", "to", "those", "used", "in", "the", "natural", "language", "annotations", "."]}, {"venue": "ACL", "title": "From SPMRL to NMRL: What Did We Learn (and Unlearn) in a Decade of Parsing Morphologically-Rich Languages (MRLs)?", "abstract": "It has been exactly a decade since the first establishment of SPMRL, a research initiative unifying multiple research efforts to address the peculiar challenges of Statistical Parsing for Morphologically-Rich Languages (MRLs). Here we reflect on parsing MRLs in that decade, highlight the solutions and lessons learned for the architectural, modeling and lexical challenges in the pre-neural era, and argue that similar challenges re-emerge in neural architectures for MRLs. We then aim to offer a climax, suggesting that incorporating symbolic ideas proposed in SPMRL terms into nowadays neural architectures has the potential to push NLP for MRLs to a new level. We sketch a strategies for designing Neural Models for MRLs (NMRL), and showcase preliminary support for these strategies via investigating the task of multi-tagging in Hebrew, a morphologically-rich, high-fusion, language.", "doc_id": "590670f55a388c4ee1dd02f5a91190de", "publication_year": 2020, "sentences": ["it has been exactly a decade since the first establishment of spmrl , a research initiative unifying multiple research efforts to address the peculiar challenges of statistical parsing for morphologically - rich languages ( mrls ) .", "here we reflect on parsing mrls in that decade , highlight the solutions and lessons learned for the architectural , modeling and lexical challenges in the pre - neural era , and argue that similar challenges re - emerge in neural architectures for mrls .", "we then aim to offer a climax , suggesting that incorporating symbolic ideas proposed in spmrl terms into nowadays neural architectures has the potential to push nlp for mrls to a new level .", "we sketch a strategies for designing neural models for mrls ( nmrl ) , and showcase preliminary support for these strategies via investigating the task of multi - tagging in hebrew , a morphologically - rich , high - fusion , language ."], "events": [{"event_type": "ITT", "arguments": [{"text": "morphologically - rich languages", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["mrls"], "offsets": [34]}], "trigger": {"text": "parsing", "tokens": ["parsing"], "offsets": [27]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [38]}, {"text": "parsing mrls", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["parsing", "mrls"], "offsets": [41, 42]}], "trigger": {"text": "reflect", "tokens": ["reflect"], "offsets": [39]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [38]}, {"text": "solutions and lessons", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["solutions", "and", "lessons"], "offsets": [49, 50, 51]}, {"text": "architectural , modeling and lexical challenges", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["architectural", ",", "modeling", "and", "lexical", "challenges"], "offsets": [55, 56, 57, 58, 59, 60]}], "trigger": {"text": "highlight", "tokens": ["highlight"], "offsets": [47]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [82]}, {"text": "symbolic ideas", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["symbolic", "ideas"], "offsets": [93, 94]}, {"text": "push", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["push"], "offsets": [107]}], "trigger": {"text": "incorporating", "tokens": ["incorporating"], "offsets": [92]}}, {"event_type": "PUR", "arguments": [{"text": "nlp for mrls to a new level", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["nlp", "for", "mrls", "to", "a", "new", "level"], "offsets": [108, 109, 110, 111, 112, 113, 114]}], "trigger": {"text": "push", "tokens": ["push"], "offsets": [107]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [116]}, {"text": "strategies for designing neural models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["strategies", "for", "designing", "neural", "models"], "offsets": [119, 120, 121, 122, 123]}, {"text": "mrls", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["mrls"], "offsets": [125]}], "trigger": {"text": "sketch", "tokens": ["sketch"], "offsets": [117]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [116]}, {"text": "preliminary support", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["preliminary", "support"], "offsets": [132, 133]}, {"text": "strategies", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["strategies"], "offsets": [136]}, {"text": "investigating the task of multi - tagging in hebrew", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["investigating", "the", "task", "of", "multi", "-", "tagging", "in", "hebrew"], "offsets": [138, 139, 140, 141, 142, 143, 144, 145, 146]}], "trigger": {"text": "showcase", "tokens": ["showcase"], "offsets": [131]}}], "document": ["it", "has", "been", "exactly", "a", "decade", "since", "the", "first", "establishment", "of", "spmrl", ",", "a", "research", "initiative", "unifying", "multiple", "research", "efforts", "to", "address", "the", "peculiar", "challenges", "of", "statistical", "parsing", "for", "morphologically", "-", "rich", "languages", "(", "mrls", ")", ".", "here", "we", "reflect", "on", "parsing", "mrls", "in", "that", "decade", ",", "highlight", "the", "solutions", "and", "lessons", "learned", "for", "the", "architectural", ",", "modeling", "and", "lexical", "challenges", "in", "the", "pre", "-", "neural", "era", ",", "and", "argue", "that", "similar", "challenges", "re", "-", "emerge", "in", "neural", "architectures", "for", "mrls", ".", "we", "then", "aim", "to", "offer", "a", "climax", ",", "suggesting", "that", "incorporating", "symbolic", "ideas", "proposed", "in", "spmrl", "terms", "into", "nowadays", "neural", "architectures", "has", "the", "potential", "to", "push", "nlp", "for", "mrls", "to", "a", "new", "level", ".", "we", "sketch", "a", "strategies", "for", "designing", "neural", "models", "for", "mrls", "(", "nmrl", ")", ",", "and", "showcase", "preliminary", "support", "for", "these", "strategies", "via", "investigating", "the", "task", "of", "multi", "-", "tagging", "in", "hebrew", ",", "a", "morphologically", "-", "rich", ",", "high", "-", "fusion", ",", "language", "."]}, {"venue": "ACL", "title": "Probing for Referential Information in Language Models", "abstract": "Language models keep track of complex information about the preceding context \u2013 including, e.g., syntactic relations in a sentence. We investigate whether they also capture information beneficial for resolving pronominal anaphora in English. We analyze two state of the art models with LSTM and Transformer architectures, via probe tasks and analysis on a coreference annotated corpus. The Transformer outperforms the LSTM in all analyses. Our results suggest that language models are more successful at learning grammatical constraints than they are at learning truly referential information, in the sense of capturing the fact that we use language to refer to entities in the world. However, we find traces of the latter aspect, too.", "doc_id": "2ed7b3a5ba47d8f63b2198bc635b8528", "publication_year": 2020, "sentences": ["language models keep track of complex information about the preceding context \u2013 including , e . g . , syntactic relations in a sentence .", "we investigate whether they also capture information beneficial for resolving pronominal anaphora in english .", "we analyze two state of the art models with lstm and transformer architectures , via probe tasks and analysis on a coreference annotated corpus .", "the transformer outperforms the lstm in all analyses .", "our results suggest that language models are more successful at learning grammatical constraints than they are at learning truly referential information , in the sense of capturing the fact that we use language to refer to entities in the world .", "however , we find traces of the latter aspect , too ."], "events": [{"event_type": "ITT", "arguments": [{"text": "language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["language", "models"], "offsets": [0, 1]}], "trigger": {"text": "keep", "tokens": ["keep"], "offsets": [2]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [25]}, {"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [30]}, {"text": "resolving", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["resolving"], "offsets": [34]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [26]}}, {"event_type": "PUR", "arguments": [{"text": "information beneficial", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["information", "beneficial"], "offsets": [31, 32]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [30]}}, {"event_type": "PUR", "arguments": [{"text": "pronominal anaphora", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["pronominal", "anaphora"], "offsets": [35, 36]}, {"text": "in english", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "english"], "offsets": [37, 38]}], "trigger": {"text": "resolving", "tokens": ["resolving"], "offsets": [34]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [40]}, {"text": "two state of the art models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["two", "state", "of", "the", "art", "models"], "offsets": [42, 43, 44, 45, 46, 47]}, {"text": "lstm architectures", "nugget_type": "APP", "argument_type": "Content", "tokens": ["lstm", "architectures"], "offsets": [49, 52]}, {"text": "transformer architectures", "nugget_type": "APP", "argument_type": "Content", "tokens": ["transformer", "architectures"], "offsets": [51, 52]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [41]}}, {"event_type": "CMP", "arguments": [{"text": "transformer", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["transformer"], "offsets": [66]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [67]}, {"text": "lstm", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["lstm"], "offsets": [69]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [67]}}, {"event_type": "FIN", "arguments": [{"text": "learning", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["learning"], "offsets": [84]}, {"text": "refer", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["refer"], "offsets": [108]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [76]}}, {"event_type": "CMP", "arguments": [{"text": "more successful", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "successful"], "offsets": [81, 82]}, {"text": "grammatical constraints", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["grammatical", "constraints"], "offsets": [85, 86]}, {"text": "truly referential information", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["truly", "referential", "information"], "offsets": [92, 93, 94]}], "trigger": {"text": "learning", "tokens": ["learning"], "offsets": [84]}}, {"event_type": "FAC", "arguments": [{"text": "in the sense of capturing the fact", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "sense", "of", "capturing", "the", "fact"], "offsets": [96, 97, 98, 99, 100, 101, 102]}, {"text": "language", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["language"], "offsets": [106]}, {"text": "entities in the world", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["entities", "in", "the", "world"], "offsets": [110, 111, 112, 113]}], "trigger": {"text": "refer", "tokens": ["refer"], "offsets": [108]}}], "document": ["language", "models", "keep", "track", "of", "complex", "information", "about", "the", "preceding", "context", "\u2013", "including", ",", "e", ".", "g", ".", ",", "syntactic", "relations", "in", "a", "sentence", ".", "we", "investigate", "whether", "they", "also", "capture", "information", "beneficial", "for", "resolving", "pronominal", "anaphora", "in", "english", ".", "we", "analyze", "two", "state", "of", "the", "art", "models", "with", "lstm", "and", "transformer", "architectures", ",", "via", "probe", "tasks", "and", "analysis", "on", "a", "coreference", "annotated", "corpus", ".", "the", "transformer", "outperforms", "the", "lstm", "in", "all", "analyses", ".", "our", "results", "suggest", "that", "language", "models", "are", "more", "successful", "at", "learning", "grammatical", "constraints", "than", "they", "are", "at", "learning", "truly", "referential", "information", ",", "in", "the", "sense", "of", "capturing", "the", "fact", "that", "we", "use", "language", "to", "refer", "to", "entities", "in", "the", "world", ".", "however", ",", "we", "find", "traces", "of", "the", "latter", "aspect", ",", "too", "."]}, {"venue": "ACL", "title": "Under the Morphosyntactic Lens: A Multifaceted Evaluation of Gender Bias in Speech Translation", "abstract": "Gender bias is largely recognized as a problematic phenomenon affecting language technologies, with recent studies underscoring that it might surface differently across languages. However, most of current evaluation practices adopt a word-level focus on a narrow set of occupational nouns under synthetic conditions. Such protocols overlook key features of grammatical gender languages, which are characterized by morphosyntactic chains of gender agreement, marked on a variety of lexical items and parts-of-speech (POS). To overcome this limitation, we enrich the natural, gender-sensitive MuST-SHE corpus (Bentivogli et al., 2020) with two new linguistic annotation layers (POS and agreement chains), and explore to what extent different lexical categories and agreement phenomena are impacted by gender skews. Focusing on speech translation, we conduct a multifaceted evaluation on three language directions (English-French/Italian/Spanish), with models trained on varying amounts of data and different word segmentation techniques. By shedding light on model behaviours, gender bias, and its detection at several levels of granularity, our findings emphasize the value of dedicated analyses beyond aggregated overall results.", "doc_id": "c098562a1516a4200efbc7569f9b37f5", "publication_year": 2022, "sentences": ["gender bias is largely recognized as a problematic phenomenon affecting language technologies , with recent studies underscoring that it might surface differently across languages .", "however , most of current evaluation practices adopt a word - level focus on a narrow set of occupational nouns under synthetic conditions .", "such protocols overlook key features of grammatical gender languages , which are characterized by morphosyntactic chains of gender agreement , marked on a variety of lexical items and parts - of - speech ( pos ) .", "to overcome this limitation , we enrich the natural , gender - sensitive must - she corpus ( bentivogli et al . , 2020 ) with two new linguistic annotation layers ( pos and agreement chains ) , and explore to what extent different lexical categories and agreement phenomena are impacted by gender skews .", "focusing on speech translation , we conduct a multifaceted evaluation on three language directions ( english - french / italian / spanish ) , with models trained on varying amounts of data and different word segmentation techniques .", "by shedding light on model behaviours , gender bias , and its detection at several levels of granularity , our findings emphasize the value of dedicated analyses beyond aggregated overall results ."], "events": [{"event_type": "ITT", "arguments": [{"text": "gender bias", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["gender", "bias"], "offsets": [0, 1]}], "trigger": {"text": "largely recognized", "tokens": ["largely", "recognized"], "offsets": [3, 4]}}, {"event_type": "RWF", "arguments": [{"text": "overlook", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["overlook"], "offsets": [51]}, {"text": "current evaluation practices", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["current", "evaluation", "practices"], "offsets": [29, 30, 31]}], "trigger": {"text": "overlook", "tokens": ["overlook"], "offsets": [51]}}, {"event_type": "MDS", "arguments": [{"text": "overcome", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["overcome"], "offsets": [87]}, {"text": "two new linguistic annotation layers", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["two", "new", "linguistic", "annotation", "layers"], "offsets": [112, 113, 114, 115, 116]}, {"text": "natural , gender - sensitive must - she corpus", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["natural", ",", "gender", "-", "sensitive", "must", "-", "she", "corpus"], "offsets": [94, 95, 96, 97, 98, 99, 100, 101, 102]}], "trigger": {"text": "enrich", "tokens": ["enrich"], "offsets": [92]}}, {"event_type": "WKS", "arguments": [{"text": "overcome", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["overcome"], "offsets": [87]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [91]}, {"text": "impacted", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["impacted"], "offsets": [136]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [125]}}, {"event_type": "PUR", "arguments": [{"text": "such protocols overlook key features of grammatical gender languages", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["such", "protocols", "overlook", "key", "features", "of", "grammatical", "gender", "languages"], "offsets": [49, 50, 51, 52, 53, 54, 55, 56, 57]}], "trigger": {"text": "overcome", "tokens": ["overcome"], "offsets": [87]}}, {"event_type": "PUR", "arguments": [{"text": "extent different lexical categories", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["extent", "different", "lexical", "categories"], "offsets": [128, 129, 130, 131]}, {"text": "agreement phenomena", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["agreement", "phenomena"], "offsets": [133, 134]}], "trigger": {"text": "impacted", "tokens": ["impacted"], "offsets": [136]}}, {"event_type": "WKS", "arguments": [{"text": "focusing on speech translation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["focusing", "on", "speech", "translation"], "offsets": [141, 142, 143, 144]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [146]}, {"text": "multifaceted evaluation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["multifaceted", "evaluation"], "offsets": [149, 150]}, {"text": "on three language directions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "three", "language", "directions"], "offsets": [151, 152, 153, 154]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [147]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [146]}, {"text": "models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["models"], "offsets": [166]}, {"text": "on varying amounts of data and different word segmentation techniques", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "varying", "amounts", "of", "data", "and", "different", "word", "segmentation", "techniques"], "offsets": [168, 169, 170, 171, 172, 173, 174, 175, 176, 177]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [167]}}, {"event_type": "FAC", "arguments": [{"text": "value of dedicated analyses beyond aggregated overall results", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["value", "of", "dedicated", "analyses", "beyond", "aggregated", "overall", "results"], "offsets": [202, 203, 204, 205, 206, 207, 208, 209]}], "trigger": {"text": "emphasize", "tokens": ["emphasize"], "offsets": [200]}}], "document": ["gender", "bias", "is", "largely", "recognized", "as", "a", "problematic", "phenomenon", "affecting", "language", "technologies", ",", "with", "recent", "studies", "underscoring", "that", "it", "might", "surface", "differently", "across", "languages", ".", "however", ",", "most", "of", "current", "evaluation", "practices", "adopt", "a", "word", "-", "level", "focus", "on", "a", "narrow", "set", "of", "occupational", "nouns", "under", "synthetic", "conditions", ".", "such", "protocols", "overlook", "key", "features", "of", "grammatical", "gender", "languages", ",", "which", "are", "characterized", "by", "morphosyntactic", "chains", "of", "gender", "agreement", ",", "marked", "on", "a", "variety", "of", "lexical", "items", "and", "parts", "-", "of", "-", "speech", "(", "pos", ")", ".", "to", "overcome", "this", "limitation", ",", "we", "enrich", "the", "natural", ",", "gender", "-", "sensitive", "must", "-", "she", "corpus", "(", "bentivogli", "et", "al", ".", ",", "2020", ")", "with", "two", "new", "linguistic", "annotation", "layers", "(", "pos", "and", "agreement", "chains", ")", ",", "and", "explore", "to", "what", "extent", "different", "lexical", "categories", "and", "agreement", "phenomena", "are", "impacted", "by", "gender", "skews", ".", "focusing", "on", "speech", "translation", ",", "we", "conduct", "a", "multifaceted", "evaluation", "on", "three", "language", "directions", "(", "english", "-", "french", "/", "italian", "/", "spanish", ")", ",", "with", "models", "trained", "on", "varying", "amounts", "of", "data", "and", "different", "word", "segmentation", "techniques", ".", "by", "shedding", "light", "on", "model", "behaviours", ",", "gender", "bias", ",", "and", "its", "detection", "at", "several", "levels", "of", "granularity", ",", "our", "findings", "emphasize", "the", "value", "of", "dedicated", "analyses", "beyond", "aggregated", "overall", "results", "."]}, {"venue": "ACL", "title": "Ditch the Gold Standard: Re-evaluating Conversational Question Answering", "abstract": "Conversational question answering aims to provide natural-language answers to users in information-seeking conversations. Existing conversational QA benchmarks compare models with pre-collected human-human conversations, using ground-truth answers provided in conversational history. It remains unclear whether we can rely on this static evaluation for model development and whether current systems can well generalize to real-world human-machine conversations. In this work, we conduct the first large-scale human evaluation of state-of-the-art conversational QA systems, where human evaluators converse with models and judge the correctness of their answers. We find that the distribution of human machine conversations differs drastically from that of human-human conversations, and there is a disagreement between human and gold-history evaluation in terms of model ranking. We further investigate how to improve automatic evaluations, and propose a question rewriting mechanism based on predicted history, which better correlates with human judgments. Finally, we analyze the impact of various modeling strategies and discuss future directions towards building better conversational question answering systems.", "doc_id": "c63df2992a3f61e2d619368b41826418", "publication_year": 2022, "sentences": ["conversational question answering aims to provide natural - language answers to users in information - seeking conversations .", "existing conversational qa benchmarks compare models with pre - collected human - human conversations , using ground - truth answers provided in conversational history .", "it remains unclear whether we can rely on this static evaluation for model development and whether current systems can well generalize to real - world human - machine conversations .", "in this work , we conduct the first large - scale human evaluation of state - of - the - art conversational qa systems , where human evaluators converse with models and judge the correctness of their answers .", "we find that the distribution of human machine conversations differs drastically from that of human - human conversations , and there is a disagreement between human and gold - history evaluation in terms of model ranking .", "we further investigate how to improve automatic evaluations , and propose a question rewriting mechanism based on predicted history , which better correlates with human judgments .", "finally , we analyze the impact of various modeling strategies and discuss future directions towards building better conversational question answering systems ."], "events": [{"event_type": "ITT", "arguments": [{"text": "conversational question answering", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["conversational", "question", "answering"], "offsets": [0, 1, 2]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [5]}}, {"event_type": "RWS", "arguments": [{"text": "existing conversational qa benchmarks", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "conversational", "qa", "benchmarks"], "offsets": [18, 19, 20, 21]}, {"text": "models with pre - collected human - human conversations", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["models", "with", "pre", "-", "collected", "human", "-", "human", "conversations"], "offsets": [23, 24, 25, 26, 27, 28, 29, 30, 31]}, {"text": "ground - truth answers provided in conversational history", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["ground", "-", "truth", "answers", "provided", "in", "conversational", "history"], "offsets": [34, 35, 36, 37, 38, 39, 40, 41]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [22]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [77]}, {"text": "large - scale human evaluation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["large", "-", "scale", "human", "evaluation"], "offsets": [81, 82, 83, 84, 85]}, {"text": "state - of - the - art conversational qa systems", "nugget_type": "APP", "argument_type": "Target", "tokens": ["state", "-", "of", "-", "the", "-", "art", "conversational", "qa", "systems"], "offsets": [87, 88, 89, 90, 91, 92, 93, 94, 95, 96]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [78]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [112]}, {"text": "differs", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["differs"], "offsets": [121]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [113]}}, {"event_type": "CMP", "arguments": [{"text": "that of human - human conversations", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["distribution", "of", "human", "-", "human", "conversations"], "offsets": [116, 125, 126, 127, 128, 129]}, {"text": "distribution of human machine conversations", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["distribution", "of", "human", "machine", "conversations"], "offsets": [116, 117, 118, 119, 120]}, {"text": "drastically", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["drastically"], "offsets": [122]}], "trigger": {"text": "differs", "tokens": ["differs"], "offsets": [121]}}, {"event_type": "FAC", "arguments": [{"text": "in terms of model ranking", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "terms", "of", "model", "ranking"], "offsets": [143, 144, 145, 146, 147]}, {"text": "disagreement between human and gold - history evaluation", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["disagreement", "between", "human", "and", "gold", "-", "history", "evaluation"], "offsets": [135, 136, 137, 138, 139, 140, 141, 142]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [113]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [149]}, {"text": "how to improve automatic evaluations", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["how", "to", "improve", "automatic", "evaluations"], "offsets": [152, 153, 154, 155, 156]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [151]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [149]}, {"text": "question rewriting mechanism", "nugget_type": "APP", "argument_type": "Content", "tokens": ["question", "rewriting", "mechanism"], "offsets": [161, 162, 163]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [159]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [178]}, {"text": "impact of various modeling strategies", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["impact", "of", "various", "modeling", "strategies"], "offsets": [181, 182, 183, 184, 185]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [179]}}, {"event_type": "WKS", "arguments": [{"text": "future directions", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["future", "directions"], "offsets": [188, 189]}, {"text": "building", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["building"], "offsets": [191]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [178]}], "trigger": {"text": "discuss", "tokens": ["discuss"], "offsets": [187]}}, {"event_type": "PUR", "arguments": [{"text": "better conversational question answering systems", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["better", "conversational", "question", "answering", "systems"], "offsets": [192, 193, 194, 195, 196]}], "trigger": {"text": "building", "tokens": ["building"], "offsets": [191]}}], "document": ["conversational", "question", "answering", "aims", "to", "provide", "natural", "-", "language", "answers", "to", "users", "in", "information", "-", "seeking", "conversations", ".", "existing", "conversational", "qa", "benchmarks", "compare", "models", "with", "pre", "-", "collected", "human", "-", "human", "conversations", ",", "using", "ground", "-", "truth", "answers", "provided", "in", "conversational", "history", ".", "it", "remains", "unclear", "whether", "we", "can", "rely", "on", "this", "static", "evaluation", "for", "model", "development", "and", "whether", "current", "systems", "can", "well", "generalize", "to", "real", "-", "world", "human", "-", "machine", "conversations", ".", "in", "this", "work", ",", "we", "conduct", "the", "first", "large", "-", "scale", "human", "evaluation", "of", "state", "-", "of", "-", "the", "-", "art", "conversational", "qa", "systems", ",", "where", "human", "evaluators", "converse", "with", "models", "and", "judge", "the", "correctness", "of", "their", "answers", ".", "we", "find", "that", "the", "distribution", "of", "human", "machine", "conversations", "differs", "drastically", "from", "that", "of", "human", "-", "human", "conversations", ",", "and", "there", "is", "a", "disagreement", "between", "human", "and", "gold", "-", "history", "evaluation", "in", "terms", "of", "model", "ranking", ".", "we", "further", "investigate", "how", "to", "improve", "automatic", "evaluations", ",", "and", "propose", "a", "question", "rewriting", "mechanism", "based", "on", "predicted", "history", ",", "which", "better", "correlates", "with", "human", "judgments", ".", "finally", ",", "we", "analyze", "the", "impact", "of", "various", "modeling", "strategies", "and", "discuss", "future", "directions", "towards", "building", "better", "conversational", "question", "answering", "systems", "."]}, {"venue": "ACL", "title": "The Language of Legal and Illegal Activity on the Darknet", "abstract": "The non-indexed parts of the Internet (the Darknet) have become a haven for both legal and illegal anonymous activity. Given the magnitude of these networks, scalably monitoring their activity necessarily relies on automated tools, and notably on NLP tools. However, little is known about what characteristics texts communicated through the Darknet have, and how well do off-the-shelf NLP tools do on this domain. This paper tackles this gap and performs an in-depth investigation of the characteristics of legal and illegal text in the Darknet, comparing it to a clear net website with similar content as a control condition. Taking drugs-related websites as a test case, we find that texts for selling legal and illegal drugs have several linguistic characteristics that distinguish them from one another, as well as from the control condition, among them the distribution of POS tags, and the coverage of their named entities in Wikipedia.", "doc_id": "d5072c2c5c45fb572207ca2f56eede9d", "publication_year": 2019, "sentences": ["the non - indexed parts of the internet ( the darknet ) have become a haven for both legal and illegal anonymous activity .", "given the magnitude of these networks , scalably monitoring their activity necessarily relies on automated tools , and notably on nlp tools .", "however , little is known about what characteristics texts communicated through the darknet have , and how well do off - the - shelf nlp tools do on this domain .", "this paper tackles this gap and performs an in - depth investigation of the characteristics of legal and illegal text in the darknet , comparing it to a clear net website with similar content as a control condition .", "taking drugs - related websites as a test case , we find that texts for selling legal and illegal drugs have several linguistic characteristics that distinguish them from one another , as well as from the control condition , among them the distribution of pos tags , and the coverage of their named entities in wikipedia ."], "events": [{"event_type": "ITT", "arguments": [{"text": "non - indexed parts of the internet", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["non", "-", "indexed", "parts", "of", "the", "internet"], "offsets": [1, 2, 3, 4, 5, 6, 7]}], "trigger": {"text": "become", "tokens": ["become"], "offsets": [13]}}, {"event_type": "WKS", "arguments": [{"text": "in the darknet", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "darknet"], "offsets": [98, 99, 100]}, {"text": "in - depth investigation of the characteristics of legal text", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["in", "-", "depth", "investigation", "of", "the", "characteristics", "of", "legal", "text"], "offsets": [86, 87, 88, 89, 90, 91, 92, 93, 94, 97]}, {"text": "illegal text", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["illegal", "text"], "offsets": [96, 97]}], "trigger": {"text": "performs", "tokens": ["performs"], "offsets": [84]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [127]}, {"text": "have", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["have"], "offsets": [137]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [128]}}, {"event_type": "FAC", "arguments": [{"text": "linguistic characteristics", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["linguistic", "characteristics"], "offsets": [139, 140]}, {"text": "texts for selling legal drugs", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["texts", "for", "selling", "legal", "drugs"], "offsets": [130, 131, 132, 133, 136]}, {"text": "illegal drugs", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["illegal", "drugs"], "offsets": [135, 136]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [137]}}, {"event_type": "MDS", "arguments": [{"text": "clear net website with similar content", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["clear", "net", "website", "with", "similar", "content"], "offsets": [106, 107, 108, 109, 110, 111]}, {"text": "characteristics of legal text", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["characteristics", "of", "legal", "text"], "offsets": [92, 93, 94, 97]}, {"text": "illegal text", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["illegal", "text"], "offsets": [96, 97]}], "trigger": {"text": "comparing", "tokens": ["comparing"], "offsets": [102]}}], "document": ["the", "non", "-", "indexed", "parts", "of", "the", "internet", "(", "the", "darknet", ")", "have", "become", "a", "haven", "for", "both", "legal", "and", "illegal", "anonymous", "activity", ".", "given", "the", "magnitude", "of", "these", "networks", ",", "scalably", "monitoring", "their", "activity", "necessarily", "relies", "on", "automated", "tools", ",", "and", "notably", "on", "nlp", "tools", ".", "however", ",", "little", "is", "known", "about", "what", "characteristics", "texts", "communicated", "through", "the", "darknet", "have", ",", "and", "how", "well", "do", "off", "-", "the", "-", "shelf", "nlp", "tools", "do", "on", "this", "domain", ".", "this", "paper", "tackles", "this", "gap", "and", "performs", "an", "in", "-", "depth", "investigation", "of", "the", "characteristics", "of", "legal", "and", "illegal", "text", "in", "the", "darknet", ",", "comparing", "it", "to", "a", "clear", "net", "website", "with", "similar", "content", "as", "a", "control", "condition", ".", "taking", "drugs", "-", "related", "websites", "as", "a", "test", "case", ",", "we", "find", "that", "texts", "for", "selling", "legal", "and", "illegal", "drugs", "have", "several", "linguistic", "characteristics", "that", "distinguish", "them", "from", "one", "another", ",", "as", "well", "as", "from", "the", "control", "condition", ",", "among", "them", "the", "distribution", "of", "pos", "tags", ",", "and", "the", "coverage", "of", "their", "named", "entities", "in", "wikipedia", "."]}, {"venue": "ACL", "title": "On the Inference Calibration of Neural Machine Translation", "abstract": "Confidence calibration, which aims to make model predictions equal to the true correctness measures, is important for neural machine translation (NMT) because it is able to offer useful indicators of translation errors in the generated output. While prior studies have shown that NMT models trained with label smoothing are well-calibrated on the ground-truth training data, we find that miscalibration still remains a severe challenge for NMT during inference due to the discrepancy between training and inference. By carefully designing experiments on three language pairs, our work provides in-depth analyses of the correlation between calibration and translation performance as well as linguistic properties of miscalibration and reports a number of interesting findings that might help humans better analyze, understand and improve NMT models. Based on these observations, we further propose a new graduated label smoothing method that can improve both inference calibration and translation performance.", "doc_id": "f5905f6cb37645359dbaadfa36d665bf", "publication_year": 2020, "sentences": ["confidence calibration , which aims to make model predictions equal to the true correctness measures , is important for neural machine translation ( nmt ) because it is able to offer useful indicators of translation errors in the generated output .", "while prior studies have shown that nmt models trained with label smoothing are well - calibrated on the ground - truth training data , we find that miscalibration still remains a severe challenge for nmt during inference due to the discrepancy between training and inference .", "by carefully designing experiments on three language pairs , our work provides in - depth analyses of the correlation between calibration and translation performance as well as linguistic properties of miscalibration and reports a number of interesting findings that might help humans better analyze , understand and improve nmt models .", "based on these observations , we further propose a new graduated label smoothing method that can improve both inference calibration and translation performance ."], "events": [{"event_type": "ITT", "arguments": [{"text": "confidence calibration", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["confidence", "calibration"], "offsets": [0, 1]}], "trigger": {"text": "make", "tokens": ["make"], "offsets": [6]}}, {"event_type": "WKS", "arguments": [{"text": "correlation between calibration and translation performance", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["correlation", "between", "calibration", "and", "translation", "performance"], "offsets": [105, 106, 107, 108, 109, 110]}, {"text": "linguistic properties of miscalibration", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["linguistic", "properties", "of", "miscalibration"], "offsets": [114, 115, 116, 117]}], "trigger": {"text": "analyses", "tokens": ["analyses"], "offsets": [102]}}, {"event_type": "WKS", "arguments": [{"text": "interesting findings", "nugget_type": "APP", "argument_type": "Content", "tokens": ["interesting", "findings"], "offsets": [123, 124]}, {"text": "analyze , understand and improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["analyze", ",", "understand", "and", "improve"], "offsets": [130, 131, 132, 133, 134]}], "trigger": {"text": "reports", "tokens": ["reports"], "offsets": [119]}}, {"event_type": "PUR", "arguments": [{"text": "nmt models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["neural", "machine", "translation", "models"], "offsets": [19, 20, 21, 136]}], "trigger": {"text": "analyze , understand and improve", "tokens": ["analyze", ",", "understand", "and", "improve"], "offsets": [130, 131, 132, 133, 134]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [143]}, {"text": "graduated label smoothing method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["graduated", "label", "smoothing", "method"], "offsets": [148, 149, 150, 151]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [154]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [145]}}, {"event_type": "PUR", "arguments": [{"text": "inference calibration and translation performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["inference", "calibration", "and", "translation", "performance"], "offsets": [156, 157, 158, 159, 160]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [154]}}], "document": ["confidence", "calibration", ",", "which", "aims", "to", "make", "model", "predictions", "equal", "to", "the", "true", "correctness", "measures", ",", "is", "important", "for", "neural", "machine", "translation", "(", "nmt", ")", "because", "it", "is", "able", "to", "offer", "useful", "indicators", "of", "translation", "errors", "in", "the", "generated", "output", ".", "while", "prior", "studies", "have", "shown", "that", "nmt", "models", "trained", "with", "label", "smoothing", "are", "well", "-", "calibrated", "on", "the", "ground", "-", "truth", "training", "data", ",", "we", "find", "that", "miscalibration", "still", "remains", "a", "severe", "challenge", "for", "nmt", "during", "inference", "due", "to", "the", "discrepancy", "between", "training", "and", "inference", ".", "by", "carefully", "designing", "experiments", "on", "three", "language", "pairs", ",", "our", "work", "provides", "in", "-", "depth", "analyses", "of", "the", "correlation", "between", "calibration", "and", "translation", "performance", "as", "well", "as", "linguistic", "properties", "of", "miscalibration", "and", "reports", "a", "number", "of", "interesting", "findings", "that", "might", "help", "humans", "better", "analyze", ",", "understand", "and", "improve", "nmt", "models", ".", "based", "on", "these", "observations", ",", "we", "further", "propose", "a", "new", "graduated", "label", "smoothing", "method", "that", "can", "improve", "both", "inference", "calibration", "and", "translation", "performance", "."]}, {"venue": "ACL", "title": "Internet-Augmented Dialogue Generation", "abstract": "The largest store of continually updating knowledge on our planet can be accessed via internet search. In this work we study giving access to this information to conversational agents. Large language models, even though they store an impressive amount of knowledge within their weights, are known to hallucinate facts when generating dialogue (Shuster et al., 2021); moreover, those facts are frozen in time at the point of model training. In contrast, we propose an approach that learns to generate an internet search query based on the context, and then conditions on the search results to finally generate a response, a method that can employ up-to-the-minute relevant information. We train and evaluate such models on a newly collected dataset of human-human conversations whereby one of the speakers is given access to internet search during knowledgedriven discussions in order to ground their responses. We find that search-query based access of the internet in conversation provides superior performance compared to existing approaches that either use no augmentation or FAISS-based retrieval (Lewis et al., 2020b).", "doc_id": "548ed968041db09172c543bacd9295e4", "publication_year": 2022, "sentences": ["the largest store of continually updating knowledge on our planet can be accessed via internet search .", "in this work we study giving access to this information to conversational agents .", "large language models , even though they store an impressive amount of knowledge within their weights , are known to hallucinate facts when generating dialogue ( shuster et al . , 2021 ) ; moreover , those facts are frozen in time at the point of model training .", "in contrast , we propose an approach that learns to generate an internet search query based on the context , and then conditions on the search results to finally generate a response , a method that can employ up - to - the - minute relevant information .", "we train and evaluate such models on a newly collected dataset of human - human conversations whereby one of the speakers is given access to internet search during knowledgedriven discussions in order to ground their responses .", "we find that search - query based access of the internet in conversation provides superior performance compared to existing approaches that either use no augmentation or faiss - based retrieval ( lewis et al . , 2020b ) ."], "events": [{"event_type": "RWF", "arguments": [{"text": "large language models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["large", "language", "models"], "offsets": [31, 32, 33]}, {"text": "hallucinate", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["hallucinate"], "offsets": [51]}, {"text": "facts", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["facts"], "offsets": [52]}], "trigger": {"text": "hallucinate", "tokens": ["hallucinate"], "offsets": [51]}}, {"event_type": "RWF", "arguments": [{"text": "those facts", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["those", "facts"], "offsets": [67, 68]}, {"text": "frozen", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["frozen"], "offsets": [70]}], "trigger": {"text": "frozen", "tokens": ["frozen"], "offsets": [70]}}, {"event_type": "MDS", "arguments": [{"text": "internet search query", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["internet", "search", "query"], "offsets": [92, 93, 94]}, {"text": "context", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["context"], "offsets": [98]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [90]}}, {"event_type": "MDS", "arguments": [{"text": "search results", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["search", "results"], "offsets": [105, 106]}, {"text": "response", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["response"], "offsets": [111]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [109]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [128]}, {"text": "models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["models"], "offsets": [133]}, {"text": "ground", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["ground"], "offsets": [161]}, {"text": "collected dataset of human - human conversations", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["collected", "dataset", "of", "human", "-", "human", "conversations"], "offsets": [137, 138, 139, 140, 141, 142, 143]}], "trigger": {"text": "train and evaluate", "tokens": ["train", "and", "evaluate"], "offsets": [129, 130, 131]}}, {"event_type": "PUR", "arguments": [{"text": "responses", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["responses"], "offsets": [163]}], "trigger": {"text": "ground", "tokens": ["ground"], "offsets": [161]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [165]}, {"text": "provides", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["provides"], "offsets": [178]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [166]}}, {"event_type": "CMP", "arguments": [{"text": "existing approaches", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "approaches"], "offsets": [183, 184]}, {"text": "search - query", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["search", "-", "query"], "offsets": [168, 169, 170]}, {"text": "based access of the internet in conversation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "access", "of", "the", "internet", "in", "conversation"], "offsets": [171, 172, 173, 174, 175, 176, 177]}, {"text": "superior", "nugget_type": "STR", "argument_type": "Result", "tokens": ["superior"], "offsets": [179]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [180]}, {"text": "that either use no augmentation or faiss - based retrieval", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["that", "either", "use", "no", "augmentation", "or", "faiss", "-", "based", "retrieval"], "offsets": [185, 186, 187, 188, 189, 190, 191, 192, 193, 194]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [178]}}, {"event_type": "MDS", "arguments": [{"text": "continually updating knowledge", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["continually", "updating", "knowledge"], "offsets": [4, 5, 6]}, {"text": "conversational agents", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["conversational", "agents"], "offsets": [28, 29]}], "trigger": {"text": "giving access to", "tokens": ["giving", "access", "to"], "offsets": [22, 23, 24]}}, {"event_type": "RWS", "arguments": [{"text": "large language models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["large", "language", "models"], "offsets": [31, 32, 33]}, {"text": "impressive amount of knowledge", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["impressive", "amount", "of", "knowledge"], "offsets": [40, 41, 42, 43]}, {"text": "their weights", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["knowledge", "weights"], "offsets": [43, 46]}], "trigger": {"text": "store", "tokens": ["store"], "offsets": [38]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [83]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach"], "offsets": [86]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [84]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [83]}, {"text": "employ", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["employ"], "offsets": [117]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [114]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [84]}}, {"event_type": "PUR", "arguments": [{"text": "up - to - the - minute relevant information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["up", "-", "to", "-", "the", "-", "minute", "relevant", "information"], "offsets": [118, 119, 120, 121, 122, 123, 124, 125, 126]}], "trigger": {"text": "employ", "tokens": ["employ"], "offsets": [117]}}], "document": ["the", "largest", "store", "of", "continually", "updating", "knowledge", "on", "our", "planet", "can", "be", "accessed", "via", "internet", "search", ".", "in", "this", "work", "we", "study", "giving", "access", "to", "this", "information", "to", "conversational", "agents", ".", "large", "language", "models", ",", "even", "though", "they", "store", "an", "impressive", "amount", "of", "knowledge", "within", "their", "weights", ",", "are", "known", "to", "hallucinate", "facts", "when", "generating", "dialogue", "(", "shuster", "et", "al", ".", ",", "2021", ")", ";", "moreover", ",", "those", "facts", "are", "frozen", "in", "time", "at", "the", "point", "of", "model", "training", ".", "in", "contrast", ",", "we", "propose", "an", "approach", "that", "learns", "to", "generate", "an", "internet", "search", "query", "based", "on", "the", "context", ",", "and", "then", "conditions", "on", "the", "search", "results", "to", "finally", "generate", "a", "response", ",", "a", "method", "that", "can", "employ", "up", "-", "to", "-", "the", "-", "minute", "relevant", "information", ".", "we", "train", "and", "evaluate", "such", "models", "on", "a", "newly", "collected", "dataset", "of", "human", "-", "human", "conversations", "whereby", "one", "of", "the", "speakers", "is", "given", "access", "to", "internet", "search", "during", "knowledgedriven", "discussions", "in", "order", "to", "ground", "their", "responses", ".", "we", "find", "that", "search", "-", "query", "based", "access", "of", "the", "internet", "in", "conversation", "provides", "superior", "performance", "compared", "to", "existing", "approaches", "that", "either", "use", "no", "augmentation", "or", "faiss", "-", "based", "retrieval", "(", "lewis", "et", "al", ".", ",", "2020b", ")", "."]}, {"venue": "ACL", "title": "Surprisal Estimators for Human Reading Times Need Character Models", "abstract": "While the use of character models has been popular in NLP applications, it has not been explored much in the context of psycholinguistic modeling. This paper presents a character model that can be applied to a structural parser-based processing model to calculate word generation probabilities. Experimental results show that surprisal estimates from a structural processing model using this character model deliver substantially better fits to self-paced reading, eye-tracking, and fMRI data than those from large-scale language models trained on much more data. This may suggest that the proposed processing model provides a more humanlike account of sentence processing, which assumes a larger role of morphology, phonotactics, and orthographic complexity than was previously thought.", "doc_id": "e748d087cbf4b8adddc1fd277ae245d6", "publication_year": 2021, "sentences": ["while the use of character models has been popular in nlp applications , it has not been explored much in the context of psycholinguistic modeling .", "this paper presents a character model that can be applied to a structural parser - based processing model to calculate word generation probabilities .", "experimental results show that surprisal estimates from a structural processing model using this character model deliver substantially better fits to self - paced reading , eye - tracking , and fmri data than those from large - scale language models trained on much more data .", "this may suggest that the proposed processing model provides a more humanlike account of sentence processing , which assumes a larger role of morphology , phonotactics , and orthographic complexity than was previously thought ."], "events": [{"event_type": "ITT", "arguments": [{"text": "character models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["character", "models"], "offsets": [4, 5]}, {"text": "in nlp applications", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "nlp", "applications"], "offsets": [9, 10, 11]}], "trigger": {"text": "popular", "tokens": ["popular"], "offsets": [8]}}, {"event_type": "PRP", "arguments": [{"text": "character model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["character", "model"], "offsets": [30, 31]}], "trigger": {"text": "presents", "tokens": ["presents"], "offsets": [28]}}, {"event_type": "MDS", "arguments": [{"text": "character model", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["character", "model"], "offsets": [30, 31]}, {"text": "structural parser - based processing model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["structural", "parser", "-", "based", "processing", "model"], "offsets": [38, 39, 40, 41, 42, 43]}, {"text": "calculate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["calculate"], "offsets": [45]}], "trigger": {"text": "applied", "tokens": ["applied"], "offsets": [35]}}, {"event_type": "PUR", "arguments": [{"text": "word generation probabilities", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["word", "generation", "probabilities"], "offsets": [46, 47, 48]}], "trigger": {"text": "calculate", "tokens": ["calculate"], "offsets": [45]}}, {"event_type": "FIN", "arguments": [{"text": "deliver", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["deliver"], "offsets": [65]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [52]}}, {"event_type": "CMP", "arguments": [{"text": "substantially", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["substantially"], "offsets": [66]}, {"text": "better fits", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better", "fits"], "offsets": [67, 68]}, {"text": "self - paced reading", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["self", "-", "paced", "reading"], "offsets": [70, 71, 72, 73]}, {"text": "eye - tracking", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["eye", "-", "tracking"], "offsets": [75, 76, 77]}, {"text": "fmri data", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["fmri", "data"], "offsets": [80, 81]}, {"text": "surprisal estimates", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["surprisal", "estimates"], "offsets": [54, 55]}, {"text": "from a structural processing model using this character model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "a", "structural", "processing", "model", "using", "this", "character", "model"], "offsets": [56, 57, 58, 59, 60, 61, 62, 63, 64]}, {"text": "those", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["surprisal", "estimates"], "offsets": [54, 55]}, {"text": "from large - scale language models trained on much more data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "large", "-", "scale", "language", "models", "trained", "on", "much", "more", "data"], "offsets": [84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94]}], "trigger": {"text": "deliver", "tokens": ["deliver"], "offsets": [65]}}, {"event_type": "FIN", "arguments": [{"text": "provides", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["provides"], "offsets": [104]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [98]}}, {"event_type": "FAC", "arguments": [{"text": "character model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["character", "model"], "offsets": [30, 31]}, {"text": "more", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["more"], "offsets": [106]}, {"text": "humanlike account of sentence processing", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["humanlike", "account", "of", "sentence", "processing"], "offsets": [107, 108, 109, 110, 111]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [104]}}, {"event_type": "FIN", "arguments": [{"text": "assumes", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["assumes"], "offsets": [114]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [98]}}, {"event_type": "FAC", "arguments": [{"text": "character model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["character", "model"], "offsets": [30, 31]}, {"text": "larger role of morphology , phonotactics", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["larger", "role", "of", "morphology", ",", "phonotactics"], "offsets": [116, 117, 118, 119, 120, 121]}, {"text": "larger role of orthographic complexity", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["larger", "role", "of", "orthographic", "complexity"], "offsets": [116, 117, 118, 124, 125]}], "trigger": {"text": "assumes", "tokens": ["assumes"], "offsets": [114]}}], "document": ["while", "the", "use", "of", "character", "models", "has", "been", "popular", "in", "nlp", "applications", ",", "it", "has", "not", "been", "explored", "much", "in", "the", "context", "of", "psycholinguistic", "modeling", ".", "this", "paper", "presents", "a", "character", "model", "that", "can", "be", "applied", "to", "a", "structural", "parser", "-", "based", "processing", "model", "to", "calculate", "word", "generation", "probabilities", ".", "experimental", "results", "show", "that", "surprisal", "estimates", "from", "a", "structural", "processing", "model", "using", "this", "character", "model", "deliver", "substantially", "better", "fits", "to", "self", "-", "paced", "reading", ",", "eye", "-", "tracking", ",", "and", "fmri", "data", "than", "those", "from", "large", "-", "scale", "language", "models", "trained", "on", "much", "more", "data", ".", "this", "may", "suggest", "that", "the", "proposed", "processing", "model", "provides", "a", "more", "humanlike", "account", "of", "sentence", "processing", ",", "which", "assumes", "a", "larger", "role", "of", "morphology", ",", "phonotactics", ",", "and", "orthographic", "complexity", "than", "was", "previously", "thought", "."]}, {"venue": "ACL", "title": "Learning Faithful Representations of Causal Graphs", "abstract": "Learning contextual text embeddings that represent causal graphs has been useful in improving the performance of downstream tasks like causal treatment effect estimation. However, existing causal embeddings which are trained to predict direct causal links, fail to capture other indirect causal links of the graph, thus leading to spurious correlations in downstream tasks. In this paper, we define the faithfulness property of contextual embeddings to capture geometric distance-based properties of directed acyclic causal graphs. By incorporating these faithfulness properties, we learn text embeddings that are 31.3% more faithful to human validated causal graphs with about 800K and 200K causal links and achieve 21.1% better Precision-Recall AUC in a link prediction fine-tuning task. Further, in a crowdsourced causal question-answering task on Yahoo! Answers with questions of the form \u201cWhat causes X?\u201d, our faithful embeddings achieved a precision of the first ranked answer (P@1) of 41.07%, outperforming the existing baseline by 10.2%.", "doc_id": "caf410b22c80a5106b51799a4f9ceccd", "publication_year": 2021, "sentences": ["learning contextual text embeddings that represent causal graphs has been useful in improving the performance of downstream tasks like causal treatment effect estimation .", "however , existing causal embeddings which are trained to predict direct causal links , fail to capture other indirect causal links of the graph , thus leading to spurious correlations in downstream tasks .", "in this paper , we define the faithfulness property of contextual embeddings to capture geometric distance - based properties of directed acyclic causal graphs .", "by incorporating these faithfulness properties , we learn text embeddings that are 31 . 3 % more faithful to human validated causal graphs with about 800k and 200k causal links and achieve 21 . 1 % better precision - recall auc in a link prediction fine - tuning task .", "further , in a crowdsourced causal question - answering task on yahoo !", "answers with questions of the form \u201c what causes x ? \u201d , our faithful embeddings achieved a precision of the first ranked answer ( p @ 1 ) of 41 . 07 % , outperforming the existing baseline by 10 . 2 % ."], "events": [{"event_type": "ITT", "arguments": [{"text": "contextual text embeddings that represent causal graphs", "nugget_type": "APP", "argument_type": "Target", "tokens": ["contextual", "text", "embeddings", "that", "represent", "causal", "graphs"], "offsets": [1, 2, 3, 4, 5, 6, 7]}], "trigger": {"text": "learning", "tokens": ["learning"], "offsets": [0]}}, {"event_type": "RWF", "arguments": [{"text": "existing causal embeddings", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "causal", "embeddings"], "offsets": [26, 27, 28]}, {"text": "fail to capture", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["fail", "to", "capture"], "offsets": [38, 39, 40]}], "trigger": {"text": "fail to capture", "tokens": ["fail", "to", "capture"], "offsets": [38, 39, 40]}}, {"event_type": "RWF", "arguments": [{"text": "in downstream tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "downstream", "tasks"], "offsets": [54, 55, 56]}, {"text": "spurious correlations", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["spurious", "correlations"], "offsets": [52, 53]}], "trigger": {"text": "leading", "tokens": ["leading"], "offsets": [50]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [62]}, {"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [71]}, {"text": "faithfulness property of contextual embeddings", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["faithfulness", "property", "of", "contextual", "embeddings"], "offsets": [65, 66, 67, 68, 69]}], "trigger": {"text": "define", "tokens": ["define"], "offsets": [63]}}, {"event_type": "PUR", "arguments": [{"text": "geometric distance - based properties of directed acyclic causal graphs", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["geometric", "distance", "-", "based", "properties", "of", "directed", "acyclic", "causal", "graphs"], "offsets": [72, 73, 74, 75, 76, 77, 78, 79, 80, 81]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [71]}}, {"event_type": "MDS", "arguments": [{"text": "faithfulness property of contextual embeddings", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["faithfulness", "property", "of", "contextual", "embeddings"], "offsets": [65, 66, 67, 68, 69]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [90]}], "trigger": {"text": "incorporating", "tokens": ["incorporating"], "offsets": [84]}}, {"event_type": "PUR", "arguments": [{"text": "text embeddings", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["text", "embeddings"], "offsets": [91, 92]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [90]}}, {"event_type": "CMP", "arguments": [{"text": "21 . 1 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["21", ".", "1", "%"], "offsets": [115, 116, 117, 118]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [119]}, {"text": "precision - recall auc", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["precision", "-", "recall", "auc"], "offsets": [120, 121, 122, 123]}, {"text": "in a link prediction fine - tuning task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "link", "prediction", "fine", "-", "tuning", "task"], "offsets": [124, 125, 126, 127, 128, 129, 130, 131]}, {"text": "text embeddings", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["text", "embeddings"], "offsets": [91, 92]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [114]}}, {"event_type": "CMP", "arguments": [{"text": "answers with questions of the form \u201c what causes x ? \u201d", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["answers", "with", "questions", "of", "the", "form", "\u201c", "what", "causes", "x", "?", "\u201d"], "offsets": [146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157]}, {"text": "faithful embeddings", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["faithful", "embeddings"], "offsets": [160, 161]}, {"text": "precision", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["precision"], "offsets": [164]}, {"text": "first ranked", "nugget_type": "STR", "argument_type": "Result", "tokens": ["first", "ranked"], "offsets": [167, 168]}, {"text": "41 . 07 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["41", ".", "07", "%"], "offsets": [176, 177, 178, 179]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [162]}}, {"event_type": "CMP", "arguments": [{"text": "answers with questions of the form \u201c what causes x ? \u201d", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["answers", "with", "questions", "of", "the", "form", "\u201c", "what", "causes", "x", "?", "\u201d"], "offsets": [146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157]}, {"text": "faithful embeddings", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["faithful", "embeddings"], "offsets": [160, 161]}, {"text": "outperforming", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforming"], "offsets": [181]}, {"text": "existing baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "baseline"], "offsets": [183, 184]}], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [181]}}], "document": ["learning", "contextual", "text", "embeddings", "that", "represent", "causal", "graphs", "has", "been", "useful", "in", "improving", "the", "performance", "of", "downstream", "tasks", "like", "causal", "treatment", "effect", "estimation", ".", "however", ",", "existing", "causal", "embeddings", "which", "are", "trained", "to", "predict", "direct", "causal", "links", ",", "fail", "to", "capture", "other", "indirect", "causal", "links", "of", "the", "graph", ",", "thus", "leading", "to", "spurious", "correlations", "in", "downstream", "tasks", ".", "in", "this", "paper", ",", "we", "define", "the", "faithfulness", "property", "of", "contextual", "embeddings", "to", "capture", "geometric", "distance", "-", "based", "properties", "of", "directed", "acyclic", "causal", "graphs", ".", "by", "incorporating", "these", "faithfulness", "properties", ",", "we", "learn", "text", "embeddings", "that", "are", "31", ".", "3", "%", "more", "faithful", "to", "human", "validated", "causal", "graphs", "with", "about", "800k", "and", "200k", "causal", "links", "and", "achieve", "21", ".", "1", "%", "better", "precision", "-", "recall", "auc", "in", "a", "link", "prediction", "fine", "-", "tuning", "task", ".", "further", ",", "in", "a", "crowdsourced", "causal", "question", "-", "answering", "task", "on", "yahoo", "!", "answers", "with", "questions", "of", "the", "form", "\u201c", "what", "causes", "x", "?", "\u201d", ",", "our", "faithful", "embeddings", "achieved", "a", "precision", "of", "the", "first", "ranked", "answer", "(", "p", "@", "1", ")", "of", "41", ".", "07", "%", ",", "outperforming", "the", "existing", "baseline", "by", "10", ".", "2", "%", "."]}, {"venue": "ACL", "title": "BERTGen: Multi-task Generation through BERT", "abstract": "We present BERTGen, a novel, generative, decoder-only model which extends BERT by fusing multimodal and multilingual pre-trained models VL-BERT and M-BERT, respectively. BERTGen is auto-regressively trained for language generation tasks, namely image captioning, machine translation and multimodal machine translation, under a multi-task setting. With a comprehensive set of evaluations, we show that BERTGen outperforms many strong baselines across the tasks explored. We also show BERTGen\u2019s ability for zero-shot language generation, where it exhibits competitive performance to supervised counterparts. Finally, we conduct ablation studies which demonstrate that BERTGen substantially benefits from multi-tasking and effectively transfers relevant inductive biases from the pre-trained models.", "doc_id": "c4d248e5dc93410f280738c6c1f6a059", "publication_year": 2021, "sentences": ["we present", "bertgen , a novel , generative , decoder - only model which extends bert by fusing multimodal and multilingual pre - trained models vl - bert and m - bert , respectively .", "bertgen is auto - regressively trained for language generation tasks , namely image captioning , machine translation and multimodal machine translation , under a multi - task setting .", "with a comprehensive set of evaluations , we show that bertgen outperforms many strong baselines across the tasks explored .", "we also show bertgen \u2019 s ability for zero - shot language generation , where it exhibits competitive performance to supervised counterparts .", "finally , we conduct ablation studies which demonstrate that bertgen substantially benefits from multi - tasking and effectively transfers relevant inductive biases from the pre - trained models ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "bertgen", "nugget_type": "APP", "argument_type": "Content", "tokens": ["bertgen"], "offsets": [2]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [71]}, {"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [75]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [72]}}, {"event_type": "CMP", "arguments": [{"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [75]}, {"text": "many strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["many", "strong", "baselines"], "offsets": [76, 77, 78]}, {"text": "across the tasks explored", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "the", "tasks", "explored"], "offsets": [79, 80, 81, 82]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [75]}}, {"event_type": "FAC", "arguments": [{"text": "generative , decoder - only model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["generative", ",", "decoder", "-", "only", "model"], "offsets": [7, 8, 9, 10, 11, 12]}, {"text": "zero - shot language generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["zero", "-", "shot", "language", "generation"], "offsets": [92, 93, 94, 95, 96]}, {"text": "bertgen \u2019 s ability", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["bertgen", "\u2019", "s", "ability"], "offsets": [87, 88, 89, 90]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [86]}}, {"event_type": "CMP", "arguments": [{"text": "supervised counterparts", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["supervised", "counterparts"], "offsets": [104, 105]}, {"text": "competitive performance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["competitive", "performance"], "offsets": [101, 102]}, {"text": "bertgen", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["bertgen"], "offsets": [87]}], "trigger": {"text": "exhibits", "tokens": ["exhibits"], "offsets": [100]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [109]}, {"text": "ablation studies", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["ablation", "studies"], "offsets": [111, 112]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [110]}}, {"event_type": "FIN", "arguments": [{"text": "benefits", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["benefits"], "offsets": [118]}, {"text": "effectively transfers", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["effectively", "transfers"], "offsets": [124, 125]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [114]}}, {"event_type": "FAC", "arguments": [{"text": "multi - tasking", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["multi", "-", "tasking"], "offsets": [120, 121, 122]}, {"text": "bertgen", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["bertgen"], "offsets": [116]}], "trigger": {"text": "benefits", "tokens": ["benefits"], "offsets": [118]}}, {"event_type": "FAC", "arguments": [{"text": "bertgen", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["bertgen"], "offsets": [116]}, {"text": "relevant inductive biases", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["relevant", "inductive", "biases"], "offsets": [126, 127, 128]}], "trigger": {"text": "effectively transfers", "tokens": ["effectively", "transfers"], "offsets": [124, 125]}}, {"event_type": "WKS", "arguments": [{"text": "vl - bert", "nugget_type": "APP", "argument_type": "Content", "tokens": ["vl", "-", "bert"], "offsets": [25, 26, 27]}, {"text": "m - bert", "nugget_type": "APP", "argument_type": "Content", "tokens": ["m", "-", "bert"], "offsets": [29, 30, 31]}, {"text": "extends", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["extends"], "offsets": [14]}], "trigger": {"text": "fusing", "tokens": ["fusing"], "offsets": [17]}}, {"event_type": "PUR", "arguments": [{"text": "bert", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["bert"], "offsets": [15]}], "trigger": {"text": "extends", "tokens": ["extends"], "offsets": [14]}}, {"event_type": "WKS", "arguments": [{"text": "bertgen", "nugget_type": "APP", "argument_type": "Content", "tokens": ["bertgen"], "offsets": [35]}, {"text": "language generation tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["language", "generation", "tasks"], "offsets": [42, 43, 44]}, {"text": "under a multi - task setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["under", "a", "multi", "-", "task", "setting"], "offsets": [57, 58, 59, 60, 61, 62]}], "trigger": {"text": "auto - regressively trained", "tokens": ["auto", "-", "regressively", "trained"], "offsets": [37, 38, 39, 40]}}], "document": ["we", "present", "bertgen", ",", "a", "novel", ",", "generative", ",", "decoder", "-", "only", "model", "which", "extends", "bert", "by", "fusing", "multimodal", "and", "multilingual", "pre", "-", "trained", "models", "vl", "-", "bert", "and", "m", "-", "bert", ",", "respectively", ".", "bertgen", "is", "auto", "-", "regressively", "trained", "for", "language", "generation", "tasks", ",", "namely", "image", "captioning", ",", "machine", "translation", "and", "multimodal", "machine", "translation", ",", "under", "a", "multi", "-", "task", "setting", ".", "with", "a", "comprehensive", "set", "of", "evaluations", ",", "we", "show", "that", "bertgen", "outperforms", "many", "strong", "baselines", "across", "the", "tasks", "explored", ".", "we", "also", "show", "bertgen", "\u2019", "s", "ability", "for", "zero", "-", "shot", "language", "generation", ",", "where", "it", "exhibits", "competitive", "performance", "to", "supervised", "counterparts", ".", "finally", ",", "we", "conduct", "ablation", "studies", "which", "demonstrate", "that", "bertgen", "substantially", "benefits", "from", "multi", "-", "tasking", "and", "effectively", "transfers", "relevant", "inductive", "biases", "from", "the", "pre", "-", "trained", "models", "."]}, {"venue": "ACL", "title": "Answering while Summarizing: Multi-task Learning for Multi-hop QA with Evidence Extraction", "abstract": "Question answering (QA) using textual sources for purposes such as reading comprehension (RC) has attracted much attention. This study focuses on the task of explainable multi-hop QA, which requires the system to return the answer with evidence sentences by reasoning and gathering disjoint pieces of the reference texts. It proposes the Query Focused Extractor (QFE) model for evidence extraction and uses multi-task learning with the QA model. QFE is inspired by extractive summarization models; compared with the existing method, which extracts each evidence sentence independently, it sequentially extracts evidence sentences by using an RNN with an attention mechanism on the question sentence. It enables QFE to consider the dependency among the evidence sentences and cover important information in the question sentence. Experimental results show that QFE with a simple RC baseline model achieves a state-of-the-art evidence extraction score on HotpotQA. Although designed for RC, it also achieves a state-of-the-art evidence extraction score on FEVER, which is a recognizing textual entailment task on a large textual database.", "doc_id": "3834ab36dd907dfa70309c45a0bed9c8", "publication_year": 2019, "sentences": ["question answering ( qa ) using textual sources for purposes such as reading comprehension ( rc ) has attracted much attention .", "this study focuses on the task of explainable multi - hop qa , which requires the system to return the answer with evidence sentences by reasoning and gathering disjoint pieces of the reference texts .", "it proposes the query focused extractor ( qfe ) model for evidence extraction and uses multi - task learning with the qa model .", "qfe is inspired by extractive summarization models ; compared with the existing method , which extracts each evidence sentence independently , it sequentially extracts evidence sentences by using an rnn with an attention mechanism on the question sentence .", "it enables qfe to consider the dependency among the evidence sentences and cover important information in the question sentence .", "experimental results show that qfe with a simple rc baseline model achieves a state - of - the - art evidence extraction score on hotpotqa .", "although designed for rc , it also achieves a state - of - the - art evidence extraction score on fever , which is a recognizing textual entailment task on a large textual database ."], "events": [{"event_type": "FAC", "arguments": [{"text": "query focused extractor ( qfe ) model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["query", "focused", "extractor", "model"], "offsets": [60, 61, 62, 66]}, {"text": "state - of - the - art evidence extraction score", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "evidence", "extraction", "score"], "offsets": [153, 154, 155, 156, 157, 158, 159, 160, 161, 162]}, {"text": "hotpotqa", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["hotpotqa"], "offsets": [164]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [151]}}], "document": ["question", "answering", "(", "qa", ")", "using", "textual", "sources", "for", "purposes", "such", "as", "reading", "comprehension", "(", "rc", ")", "has", "attracted", "much", "attention", ".", "this", "study", "focuses", "on", "the", "task", "of", "explainable", "multi", "-", "hop", "qa", ",", "which", "requires", "the", "system", "to", "return", "the", "answer", "with", "evidence", "sentences", "by", "reasoning", "and", "gathering", "disjoint", "pieces", "of", "the", "reference", "texts", ".", "it", "proposes", "the", "query", "focused", "extractor", "(", "qfe", ")", "model", "for", "evidence", "extraction", "and", "uses", "multi", "-", "task", "learning", "with", "the", "qa", "model", ".", "qfe", "is", "inspired", "by", "extractive", "summarization", "models", ";", "compared", "with", "the", "existing", "method", ",", "which", "extracts", "each", "evidence", "sentence", "independently", ",", "it", "sequentially", "extracts", "evidence", "sentences", "by", "using", "an", "rnn", "with", "an", "attention", "mechanism", "on", "the", "question", "sentence", ".", "it", "enables", "qfe", "to", "consider", "the", "dependency", "among", "the", "evidence", "sentences", "and", "cover", "important", "information", "in", "the", "question", "sentence", ".", "experimental", "results", "show", "that", "qfe", "with", "a", "simple", "rc", "baseline", "model", "achieves", "a", "state", "-", "of", "-", "the", "-", "art", "evidence", "extraction", "score", "on", "hotpotqa", ".", "although", "designed", "for", "rc", ",", "it", "also", "achieves", "a", "state", "-", "of", "-", "the", "-", "art", "evidence", "extraction", "score", "on", "fever", ",", "which", "is", "a", "recognizing", "textual", "entailment", "task", "on", "a", "large", "textual", "database", "."]}, {"venue": "ACL", "title": "Pixie: Preference in Implicit and Explicit Comparisons", "abstract": "We present Pixie, a manually annotated dataset for preference classification comprising 8,890 sentences drawn from app reviews. Unlike previous studies on preference classification, Pixie contains implicit (omitting an entity being compared) and indirect (lacking comparative linguistic cues) comparisons. We find that transformer-based pretrained models, finetuned on Pixie, achieve a weighted average F1 score of 83.34% and outperform the existing state-of-the-art preference classification model (73.99%).", "doc_id": "78137eba2dc4fcc6eb440a009bbb1fa5", "publication_year": 2022, "sentences": ["we present pixie , a manually annotated dataset for preference classification comprising 8 , 890 sentences drawn from app reviews .", "unlike previous studies on preference classification , pixie contains implicit ( omitting an entity being compared ) and indirect ( lacking comparative linguistic cues ) comparisons .", "we find that transformer - based pretrained models , finetuned on pixie , achieve a weighted average f1 score of 83 . 34 % and outperform the existing state - of - the - art preference classification model ( 73 . 99 % ) ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "pixie", "nugget_type": "DST", "argument_type": "Content", "tokens": ["pixie"], "offsets": [2]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [48]}, {"text": "achieve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieve"], "offsets": [61]}, {"text": "outperform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperform"], "offsets": [73]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [49]}}, {"event_type": "CMP", "arguments": [{"text": "transformer - based pretrained models", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["transformer", "-", "based", "pretrained", "models"], "offsets": [51, 52, 53, 54, 55]}, {"text": "existing state - of - the - art preference classification model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "state", "-", "of", "-", "the", "-", "art", "preference", "classification", "model"], "offsets": [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [73]}}, {"event_type": "FAC", "arguments": [{"text": "transformer - based pretrained models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["transformer", "-", "based", "pretrained", "models"], "offsets": [51, 52, 53, 54, 55]}, {"text": "finetuned on pixie", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["finetuned", "on", "pixie"], "offsets": [57, 58, 59]}, {"text": "83 . 34 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["83", ".", "34", "%"], "offsets": [68, 69, 70, 71]}, {"text": "weighted average f1 score", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["weighted", "average", "f1", "score"], "offsets": [63, 64, 65, 66]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [61]}}], "document": ["we", "present", "pixie", ",", "a", "manually", "annotated", "dataset", "for", "preference", "classification", "comprising", "8", ",", "890", "sentences", "drawn", "from", "app", "reviews", ".", "unlike", "previous", "studies", "on", "preference", "classification", ",", "pixie", "contains", "implicit", "(", "omitting", "an", "entity", "being", "compared", ")", "and", "indirect", "(", "lacking", "comparative", "linguistic", "cues", ")", "comparisons", ".", "we", "find", "that", "transformer", "-", "based", "pretrained", "models", ",", "finetuned", "on", "pixie", ",", "achieve", "a", "weighted", "average", "f1", "score", "of", "83", ".", "34", "%", "and", "outperform", "the", "existing", "state", "-", "of", "-", "the", "-", "art", "preference", "classification", "model", "(", "73", ".", "99", "%", ")", "."]}, {"venue": "ACL", "title": "Multi-Modal Sarcasm Detection via Cross-Modal Graph Convolutional Network", "abstract": "With the increasing popularity of posting multimodal messages online, many recent studies have been carried out utilizing both textual and visual information for multi-modal sarcasm detection. In this paper, we investigate multi-modal sarcasm detection from a novel perspective by constructing a cross-modal graph for each instance to explicitly draw the ironic relations between textual and visual modalities. Specifically, we first detect the objects paired with descriptions of the image modality, enabling the learning of important visual information. Then, the descriptions of the objects are served as a bridge to determine the importance of the association between the objects of image modality and the contextual words of text modality, so as to build a cross-modal graph for each multi-modal instance. Furthermore, we devise a cross-modal graph convolutional network to make sense of the incongruity relations between modalities for multi-modal sarcasm detection. Extensive experimental results and in-depth analysis show that our model achieves state-of-the-art performance in multi-modal sarcasm detection.", "doc_id": "7dc3ade2c3d5df39ab2a62aa1710d03b", "publication_year": 2022, "sentences": ["with the increasing popularity of posting multimodal messages online , many recent studies have been carried out utilizing both textual and visual information for multi - modal sarcasm detection .", "in this paper , we investigate multi - modal sarcasm detection from a novel perspective by constructing a cross - modal graph for each instance to explicitly draw the ironic relations between textual and visual modalities .", "specifically , we first detect the objects paired with descriptions of the image modality , enabling the learning of important visual information .", "then , the descriptions of the objects are served as a bridge to determine the importance of the association between the objects of image modality and the contextual words of text modality , so as to build a cross - modal graph for each multi - modal instance .", "furthermore , we devise a cross - modal graph convolutional network to make sense of the incongruity relations between modalities for multi - modal sarcasm detection .", "extensive experimental results and in - depth analysis show that our model achieves state - of - the - art performance in multi - modal sarcasm detection ."], "events": [{"event_type": "RWS", "arguments": [{"text": "textual and visual information", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["textual", "and", "visual", "information"], "offsets": [19, 20, 21, 22]}, {"text": "multi - modal sarcasm detection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multi", "-", "modal", "sarcasm", "detection"], "offsets": [24, 25, 26, 27, 28]}, {"text": "many recent studies", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["many", "recent", "studies"], "offsets": [10, 11, 12]}], "trigger": {"text": "utilizing", "tokens": ["utilizing"], "offsets": [17]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [34]}, {"text": "multi - modal sarcasm detection", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["multi", "-", "modal", "sarcasm", "detection"], "offsets": [36, 37, 38, 39, 40]}, {"text": "from a novel perspective", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "a", "novel", "perspective"], "offsets": [41, 42, 43, 44]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [35]}}, {"event_type": "MDS", "arguments": [{"text": "cross - modal graph", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["cross", "-", "modal", "graph"], "offsets": [48, 49, 50, 51]}, {"text": "each instance", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["each", "instance"], "offsets": [53, 54]}, {"text": "draw", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["draw"], "offsets": [57]}], "trigger": {"text": "constructing", "tokens": ["constructing"], "offsets": [46]}}, {"event_type": "PUR", "arguments": [{"text": "ironic relations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["ironic", "relations"], "offsets": [59, 60]}, {"text": "between textual and visual modalities", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "textual", "and", "visual", "modalities"], "offsets": [61, 62, 63, 64, 65]}], "trigger": {"text": "draw", "tokens": ["draw"], "offsets": [57]}}, {"event_type": "MDS", "arguments": [{"text": "learning", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learning"], "offsets": [84]}, {"text": "objects paired", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["objects", "paired"], "offsets": [73, 74]}, {"text": "image modality", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["image", "modality"], "offsets": [79, 80]}], "trigger": {"text": "detect", "tokens": ["detect"], "offsets": [71]}}, {"event_type": "PUR", "arguments": [{"text": "important visual information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["important", "visual", "information"], "offsets": [86, 87, 88]}], "trigger": {"text": "learning", "tokens": ["learning"], "offsets": [84]}}, {"event_type": "MDS", "arguments": [{"text": "descriptions of the objects", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["descriptions", "of", "the", "objects"], "offsets": [93, 94, 95, 96]}, {"text": "importance of the association", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["importance", "of", "the", "association"], "offsets": [105, 106, 107, 108]}, {"text": "objects of image modality", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["objects", "of", "image", "modality"], "offsets": [111, 112, 113, 114]}, {"text": "contextual words of text modality", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["contextual", "words", "of", "text", "modality"], "offsets": [117, 118, 119, 120, 121]}], "trigger": {"text": "determine", "tokens": ["determine"], "offsets": [103]}}, {"event_type": "MDS", "arguments": [{"text": "cross - modal graph", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["cross", "-", "modal", "graph"], "offsets": [128, 129, 130, 131]}, {"text": "each multi - modal instance", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["each", "multi", "-", "modal", "instance"], "offsets": [133, 134, 135, 136, 137]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [126]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [141]}, {"text": "cross - modal graph convolutional network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["cross", "-", "modal", "graph", "convolutional", "network"], "offsets": [144, 145, 146, 147, 148, 149]}, {"text": "make sense of", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["make", "sense", "of"], "offsets": [151, 152, 153]}], "trigger": {"text": "devise", "tokens": ["devise"], "offsets": [142]}}, {"event_type": "PUR", "arguments": [{"text": "incongruity relations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["incongruity", "relations"], "offsets": [155, 156]}, {"text": "between modalities for multi - modal sarcasm detection", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "modalities", "for", "multi", "-", "modal", "sarcasm", "detection"], "offsets": [157, 158, 159, 160, 161, 162, 163, 164]}], "trigger": {"text": "make sense of", "tokens": ["make", "sense", "of"], "offsets": [151, 152, 153]}}, {"event_type": "FAC", "arguments": [{"text": "multi - modal sarcasm detection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multi", "-", "modal", "sarcasm", "detection"], "offsets": [188, 189, 190, 191, 192]}, {"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [179, 180, 181, 182, 183, 184, 185, 186]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [178]}}], "document": ["with", "the", "increasing", "popularity", "of", "posting", "multimodal", "messages", "online", ",", "many", "recent", "studies", "have", "been", "carried", "out", "utilizing", "both", "textual", "and", "visual", "information", "for", "multi", "-", "modal", "sarcasm", "detection", ".", "in", "this", "paper", ",", "we", "investigate", "multi", "-", "modal", "sarcasm", "detection", "from", "a", "novel", "perspective", "by", "constructing", "a", "cross", "-", "modal", "graph", "for", "each", "instance", "to", "explicitly", "draw", "the", "ironic", "relations", "between", "textual", "and", "visual", "modalities", ".", "specifically", ",", "we", "first", "detect", "the", "objects", "paired", "with", "descriptions", "of", "the", "image", "modality", ",", "enabling", "the", "learning", "of", "important", "visual", "information", ".", "then", ",", "the", "descriptions", "of", "the", "objects", "are", "served", "as", "a", "bridge", "to", "determine", "the", "importance", "of", "the", "association", "between", "the", "objects", "of", "image", "modality", "and", "the", "contextual", "words", "of", "text", "modality", ",", "so", "as", "to", "build", "a", "cross", "-", "modal", "graph", "for", "each", "multi", "-", "modal", "instance", ".", "furthermore", ",", "we", "devise", "a", "cross", "-", "modal", "graph", "convolutional", "network", "to", "make", "sense", "of", "the", "incongruity", "relations", "between", "modalities", "for", "multi", "-", "modal", "sarcasm", "detection", ".", "extensive", "experimental", "results", "and", "in", "-", "depth", "analysis", "show", "that", "our", "model", "achieves", "state", "-", "of", "-", "the", "-", "art", "performance", "in", "multi", "-", "modal", "sarcasm", "detection", "."]}, {"venue": "ACL", "title": "Quantified Reproducibility Assessment of NLP Results", "abstract": "This paper describes and tests a method for carrying out quantified reproducibility assessment (QRA) that is based on concepts and definitions from metrology. QRA produces a single score estimating the degree of reproducibility of a given system and evaluation measure, on the basis of the scores from, and differences between, different reproductions. We test QRA on 18 different system and evaluation measure combinations (involving diverse NLP tasks and types of evaluation), for each of which we have the original results and one to seven reproduction results. The proposed QRA method produces degree-of-reproducibility scores that are comparable across multiple reproductions not only of the same, but also of different, original studies. We find that the proposed method facilitates insights into causes of variation between reproductions, and as a result, allows conclusions to be drawn about what aspects of system and/or evaluation design need to be changed in order to improve reproducibility.", "doc_id": "1fb5f57cce014f261b22bc7f6e070c1f", "publication_year": 2022, "sentences": ["this paper describes and tests a method for carrying out quantified reproducibility assessment ( qra ) that is based on concepts and definitions from metrology .", "qra produces a single score estimating the degree of reproducibility of a given system and evaluation measure , on the basis of the scores from , and differences between , different reproductions .", "we test qra on 18 different system and evaluation measure combinations ( involving diverse nlp tasks and types of evaluation ) , for each of which we have the original results and one to seven reproduction results .", "the proposed qra method produces degree - of - reproducibility scores that are comparable across multiple reproductions not only of the same , but also of different , original studies .", "we find that the proposed method facilitates insights into causes of variation between reproductions , and as a result , allows conclusions to be drawn about what aspects of system and / or evaluation design need to be changed in order to improve reproducibility ."], "events": [{"event_type": "WKS", "arguments": [{"text": "quantified reproducibility assessment", "nugget_type": "APP", "argument_type": "Content", "tokens": ["quantified", "reproducibility", "assessment"], "offsets": [10, 11, 12]}, {"text": "on concepts from metrology", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "concepts", "from", "metrology"], "offsets": [19, 20, 23, 24]}, {"text": "definitions from metrology", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["definitions", "from", "metrology"], "offsets": [22, 23, 24]}], "trigger": {"text": "describes and tests", "tokens": ["describes", "and", "tests"], "offsets": [2, 3, 4]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [59]}, {"text": "on 18 different system and evaluation measure combinations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "18", "different", "system", "and", "evaluation", "measure", "combinations"], "offsets": [62, 63, 64, 65, 66, 67, 68, 69]}, {"text": "qra", "nugget_type": "APP", "argument_type": "Content", "tokens": ["quantified", "reproducibility", "assessment"], "offsets": [10, 11, 12]}], "trigger": {"text": "test", "tokens": ["test"], "offsets": [60]}}, {"event_type": "FIN", "arguments": [{"text": "facilitates", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["facilitates"], "offsets": [134]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [129]}}, {"event_type": "FAC", "arguments": [{"text": "quantified reproducibility assessment", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["quantified", "reproducibility", "assessment"], "offsets": [10, 11, 12]}, {"text": "causes of variation between reproductions", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["causes", "of", "variation", "between", "reproductions"], "offsets": [137, 138, 139, 140, 141]}], "trigger": {"text": "facilitates", "tokens": ["facilitates"], "offsets": [134]}}, {"event_type": "MDS", "arguments": [{"text": "single score", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["single", "score"], "offsets": [29, 30]}, {"text": "estimating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["estimating"], "offsets": [31]}, {"text": "scores from different reproductions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["scores", "from", "different", "reproductions"], "offsets": [49, 50, 56, 57]}, {"text": "differences between different reproductions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["differences", "between", "different", "reproductions"], "offsets": [53, 54, 56, 57]}], "trigger": {"text": "produces", "tokens": ["produces"], "offsets": [27]}}, {"event_type": "PUR", "arguments": [{"text": "degree of reproducibility", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["degree", "of", "reproducibility"], "offsets": [33, 34, 35]}], "trigger": {"text": "estimating", "tokens": ["estimating"], "offsets": [31]}}, {"event_type": "FAC", "arguments": [{"text": "original results", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["original", "results"], "offsets": [88, 89]}, {"text": "one to seven reproduction results", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["one", "to", "seven", "reproduction", "results"], "offsets": [91, 92, 93, 94, 95]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [86]}}, {"event_type": "FAC", "arguments": [{"text": "across multiple reproductions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "multiple", "reproductions"], "offsets": [111, 112, 113]}, {"text": "degree - of - reproducibility scores", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["degree", "-", "of", "-", "reproducibility", "scores"], "offsets": [102, 103, 104, 105, 106, 107]}], "trigger": {"text": "comparable", "tokens": ["comparable"], "offsets": [110]}}], "document": ["this", "paper", "describes", "and", "tests", "a", "method", "for", "carrying", "out", "quantified", "reproducibility", "assessment", "(", "qra", ")", "that", "is", "based", "on", "concepts", "and", "definitions", "from", "metrology", ".", "qra", "produces", "a", "single", "score", "estimating", "the", "degree", "of", "reproducibility", "of", "a", "given", "system", "and", "evaluation", "measure", ",", "on", "the", "basis", "of", "the", "scores", "from", ",", "and", "differences", "between", ",", "different", "reproductions", ".", "we", "test", "qra", "on", "18", "different", "system", "and", "evaluation", "measure", "combinations", "(", "involving", "diverse", "nlp", "tasks", "and", "types", "of", "evaluation", ")", ",", "for", "each", "of", "which", "we", "have", "the", "original", "results", "and", "one", "to", "seven", "reproduction", "results", ".", "the", "proposed", "qra", "method", "produces", "degree", "-", "of", "-", "reproducibility", "scores", "that", "are", "comparable", "across", "multiple", "reproductions", "not", "only", "of", "the", "same", ",", "but", "also", "of", "different", ",", "original", "studies", ".", "we", "find", "that", "the", "proposed", "method", "facilitates", "insights", "into", "causes", "of", "variation", "between", "reproductions", ",", "and", "as", "a", "result", ",", "allows", "conclusions", "to", "be", "drawn", "about", "what", "aspects", "of", "system", "and", "/", "or", "evaluation", "design", "need", "to", "be", "changed", "in", "order", "to", "improve", "reproducibility", "."]}, {"venue": "ACL", "title": "On The Ingredients of an Effective Zero-shot Semantic Parser", "abstract": "Semantic parsers map natural language utterances into meaning representations (e.g., programs). Such models are typically bottlenecked by the paucity of training data due to the required laborious annotation efforts. Recent studies have performed zero-shot learning by synthesizing training examples of canonical utterances and programs from a grammar, and further paraphrasing these utterances to improve linguistic diversity. However, such synthetic examples cannot fully capture patterns in real data. In this paper we analyze zero-shot parsers through the lenses of the language and logical gaps (Herzig and Berant, 2019), which quantify the discrepancy of language and programmatic patterns between the canonical examples and real-world user-issued ones. We propose bridging these gaps using improved grammars, stronger paraphrasers, and efficient learning methods using canonical examples that most likely reflect real user intents. Our model achieves strong performance on two semantic parsing benchmarks (Scholar, Geo) with zero labeled data.", "doc_id": "77b314e609b2090f64d95ced967a43dc", "publication_year": 2022, "sentences": ["semantic parsers map natural language utterances into meaning representations ( e . g . , programs ) .", "such models are typically bottlenecked by the paucity of training data due to the required laborious annotation efforts .", "recent studies have performed zero - shot learning by synthesizing training examples of canonical utterances and programs from a grammar , and further paraphrasing these utterances to improve linguistic diversity .", "however , such synthetic examples cannot fully capture patterns in real data .", "in this paper we analyze zero - shot parsers through the lenses of the language and logical gaps ( herzig and berant , 2019 ) , which quantify the discrepancy of language and programmatic patterns between the canonical examples and real - world user - issued ones .", "we propose bridging these gaps using improved grammars , stronger paraphrasers , and efficient learning methods using canonical examples that most likely reflect real user intents .", "our model achieves strong performance on two semantic parsing benchmarks ( scholar , geo ) with zero labeled data ."], "events": [{"event_type": "ITT", "arguments": [{"text": "semantic parsers", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["semantic", "parsers"], "offsets": [0, 1]}], "trigger": {"text": "map", "tokens": ["map"], "offsets": [2]}}, {"event_type": "RWF", "arguments": [{"text": "bottlenecked", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["bottlenecked"], "offsets": [22]}, {"text": "paucity of training data", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["paucity", "of", "training", "data"], "offsets": [25, 26, 27, 28]}], "trigger": {"text": "bottlenecked", "tokens": ["bottlenecked"], "offsets": [22]}}, {"event_type": "RWS", "arguments": [{"text": "recent studies", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["recent", "studies"], "offsets": [37, 38]}, {"text": "performed", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["performed"], "offsets": [40]}, {"text": "from a grammar", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "a", "grammar"], "offsets": [54, 55, 56]}, {"text": "training examples of canonical utterances", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["training", "examples", "of", "canonical", "utterances"], "offsets": [47, 48, 49, 50, 51]}, {"text": "training examples of programs", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["training", "examples", "of", "programs"], "offsets": [47, 48, 49, 53]}], "trigger": {"text": "synthesizing", "tokens": ["synthesizing"], "offsets": [46]}}, {"event_type": "PUR", "arguments": [{"text": "zero - shot learning", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["zero", "-", "shot", "learning"], "offsets": [41, 42, 43, 44]}], "trigger": {"text": "performed", "tokens": ["performed"], "offsets": [40]}}, {"event_type": "RWS", "arguments": [{"text": "utterances", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["utterances"], "offsets": [62]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [64]}], "trigger": {"text": "paraphrasing", "tokens": ["paraphrasing"], "offsets": [60]}}, {"event_type": "PUR", "arguments": [{"text": "linguistic diversity", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["linguistic", "diversity"], "offsets": [65, 66]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [64]}}, {"event_type": "RWF", "arguments": [{"text": "synthetic examples", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["synthetic", "examples"], "offsets": [71, 72]}, {"text": "cannot fully capture", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["cannot", "fully", "capture"], "offsets": [73, 74, 75]}], "trigger": {"text": "cannot fully capture", "tokens": ["cannot", "fully", "capture"], "offsets": [73, 74, 75]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [84]}, {"text": "zero - shot parsers", "nugget_type": "APP", "argument_type": "Content", "tokens": ["zero", "-", "shot", "parsers"], "offsets": [86, 87, 88, 89]}, {"text": "quantify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["quantify"], "offsets": [108]}, {"text": "through the lenses of the", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "the", "lenses", "of", "the"], "offsets": [90, 91, 92, 93, 94]}, {"text": "lenses of the language", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["lenses", "of", "the", "language"], "offsets": [92, 93, 94, 95]}, {"text": "lenses of the logical gaps", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["lenses", "of", "the", "logical", "gaps"], "offsets": [92, 93, 94, 97, 98]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [85]}}, {"event_type": "MDS", "arguments": [{"text": "bridging", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["bridging"], "offsets": [131]}, {"text": "improved grammars", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["improved", "grammars"], "offsets": [135, 136]}, {"text": "stronger paraphrasers", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["stronger", "paraphrasers"], "offsets": [138, 139]}, {"text": "efficient learning methods", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["efficient", "learning", "methods"], "offsets": [142, 143, 144]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [134]}}, {"event_type": "PUR", "arguments": [{"text": "gaps", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["discrepancy", "of", "language", "and", "programmatic", "patterns", "between", "the", "canonical", "examples", "and", "real", "-", "world", "user", "-", "issued", "ones"], "offsets": [110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127]}], "trigger": {"text": "bridging", "tokens": ["bridging"], "offsets": [131]}}, {"event_type": "FAC", "arguments": [{"text": "on two semantic parsing benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "two", "semantic", "parsing", "benchmarks"], "offsets": [161, 162, 163, 164, 165]}, {"text": "with zero labeled data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "zero", "labeled", "data"], "offsets": [171, 172, 173, 174]}, {"text": "strong performance", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["strong", "performance"], "offsets": [159, 160]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [158]}}, {"event_type": "PUR", "arguments": [{"text": "discrepancy of language and programmatic patterns between the canonical examples and real - world user - issued ones", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["discrepancy", "of", "language", "and", "programmatic", "patterns", "between", "the", "canonical", "examples", "and", "real", "-", "world", "user", "-", "issued", "ones"], "offsets": [110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127]}], "trigger": {"text": "quantify", "tokens": ["quantify"], "offsets": [108]}}], "document": ["semantic", "parsers", "map", "natural", "language", "utterances", "into", "meaning", "representations", "(", "e", ".", "g", ".", ",", "programs", ")", ".", "such", "models", "are", "typically", "bottlenecked", "by", "the", "paucity", "of", "training", "data", "due", "to", "the", "required", "laborious", "annotation", "efforts", ".", "recent", "studies", "have", "performed", "zero", "-", "shot", "learning", "by", "synthesizing", "training", "examples", "of", "canonical", "utterances", "and", "programs", "from", "a", "grammar", ",", "and", "further", "paraphrasing", "these", "utterances", "to", "improve", "linguistic", "diversity", ".", "however", ",", "such", "synthetic", "examples", "cannot", "fully", "capture", "patterns", "in", "real", "data", ".", "in", "this", "paper", "we", "analyze", "zero", "-", "shot", "parsers", "through", "the", "lenses", "of", "the", "language", "and", "logical", "gaps", "(", "herzig", "and", "berant", ",", "2019", ")", ",", "which", "quantify", "the", "discrepancy", "of", "language", "and", "programmatic", "patterns", "between", "the", "canonical", "examples", "and", "real", "-", "world", "user", "-", "issued", "ones", ".", "we", "propose", "bridging", "these", "gaps", "using", "improved", "grammars", ",", "stronger", "paraphrasers", ",", "and", "efficient", "learning", "methods", "using", "canonical", "examples", "that", "most", "likely", "reflect", "real", "user", "intents", ".", "our", "model", "achieves", "strong", "performance", "on", "two", "semantic", "parsing", "benchmarks", "(", "scholar", ",", "geo", ")", "with", "zero", "labeled", "data", "."]}, {"venue": "ACL", "title": "Misleading Failures of Partial-input Baselines", "abstract": "Recent work establishes dataset difficulty and removes annotation artifacts via partial-input baselines (e.g., hypothesis-only model for SNLI or question-only model for VQA). A successful partial-input baseline indicates that the dataset is cheatable. But the converse is not necessarily true: failures of partial-input baselines do not mean the dataset is free of artifacts. We first design artificial datasets to illustrate how the trivial patterns that are only visible in the full input can evade any partial-input baseline. Next, we identify such artifacts in the SNLI dataset\u2014a hypothesis-only model augmented with trivial patterns in the premise can solve 15% of previously-thought \u201chard\u201d examples. Our work provides a caveat for the use and creation of partial-input baselines for datasets.", "doc_id": "67f870c77d2e9a4f0e0c6d9834afe3f4", "publication_year": 2019, "sentences": ["recent work establishes dataset difficulty and removes annotation artifacts via partial - input baselines ( e . g . , hypothesis - only model for snli or question - only model for vqa ) .", "a successful partial - input baseline indicates that the dataset is cheatable .", "but the converse is not necessarily true : failures of partial - input baselines do not mean the dataset is free of artifacts .", "we first design artificial datasets to illustrate how the trivial patterns that are only visible in the full input can evade any partial - input baseline .", "next , we identify such artifacts in the snli dataset \u2014 a hypothesis - only model augmented with trivial patterns in the premise can solve 15 % of previously - thought \u201c hard \u201d examples .", "our work provides a caveat for the use and creation of partial - input baselines for datasets ."], "events": [{"event_type": "ITT", "arguments": [{"text": "partial - input baselines", "nugget_type": "APP", "argument_type": "Target", "tokens": ["partial", "-", "input", "baselines"], "offsets": [10, 11, 12, 13]}], "trigger": {"text": "establishes", "tokens": ["establishes"], "offsets": [2]}}, {"event_type": "RWS", "arguments": [{"text": "recent work", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["recent", "work"], "offsets": [0, 1]}, {"text": "partial - input baselines", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["partial", "-", "input", "baselines"], "offsets": [10, 11, 12, 13]}, {"text": "annotation artifacts", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["annotation", "artifacts"], "offsets": [7, 8]}], "trigger": {"text": "removes", "tokens": ["removes"], "offsets": [6]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [72]}, {"text": "artificial datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["artificial", "datasets"], "offsets": [75, 76]}, {"text": "illustrate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["illustrate"], "offsets": [78]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [74]}}, {"event_type": "PUR", "arguments": [{"text": "trivial patterns", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["trivial", "patterns"], "offsets": [81, 82]}], "trigger": {"text": "illustrate", "tokens": ["illustrate"], "offsets": [78]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [101]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [102]}}], "document": ["recent", "work", "establishes", "dataset", "difficulty", "and", "removes", "annotation", "artifacts", "via", "partial", "-", "input", "baselines", "(", "e", ".", "g", ".", ",", "hypothesis", "-", "only", "model", "for", "snli", "or", "question", "-", "only", "model", "for", "vqa", ")", ".", "a", "successful", "partial", "-", "input", "baseline", "indicates", "that", "the", "dataset", "is", "cheatable", ".", "but", "the", "converse", "is", "not", "necessarily", "true", ":", "failures", "of", "partial", "-", "input", "baselines", "do", "not", "mean", "the", "dataset", "is", "free", "of", "artifacts", ".", "we", "first", "design", "artificial", "datasets", "to", "illustrate", "how", "the", "trivial", "patterns", "that", "are", "only", "visible", "in", "the", "full", "input", "can", "evade", "any", "partial", "-", "input", "baseline", ".", "next", ",", "we", "identify", "such", "artifacts", "in", "the", "snli", "dataset", "\u2014", "a", "hypothesis", "-", "only", "model", "augmented", "with", "trivial", "patterns", "in", "the", "premise", "can", "solve", "15", "%", "of", "previously", "-", "thought", "\u201c", "hard", "\u201d", "examples", ".", "our", "work", "provides", "a", "caveat", "for", "the", "use", "and", "creation", "of", "partial", "-", "input", "baselines", "for", "datasets", "."]}, {"venue": "ACL", "title": "Out of the Echo Chamber: Detecting Countering Debate Speeches", "abstract": "An educated and informed consumption of media content has become a challenge in modern times. With the shift from traditional news outlets to social media and similar venues, a major concern is that readers are becoming encapsulated in \u201cecho chambers\u201d and may fall prey to fake news and disinformation, lacking easy access to dissenting views. We suggest a novel task aiming to alleviate some of these concerns \u2013 that of detecting articles that most effectively counter the arguments \u2013 and not just the stance \u2013 made in a given text. We study this problem in the context of debate speeches. Given such a speech, we aim to identify, from among a set of speeches on the same topic and with an opposing stance, the ones that directly counter it. We provide a large dataset of 3,685 such speeches (in English), annotated for this relation, which hopefully would be of general interest to the NLP community. We explore several algorithms addressing this task, and while some are successful, all fall short of expert human performance, suggesting room for further research. All data collected during this work is freely available for research.", "doc_id": "cb8b9b62b8779fba5a6c60a5b5340fcf", "publication_year": 2020, "sentences": ["an educated and informed consumption of media content has become a challenge in modern times .", "with the shift from traditional news outlets to social media and similar venues , a major concern is that readers are becoming encapsulated in \u201c echo chambers \u201d and may fall prey to fake news and disinformation , lacking easy access to dissenting views .", "we suggest a novel task aiming to alleviate some of these concerns \u2013 that of detecting articles that most effectively counter the arguments \u2013 and not just the stance \u2013 made in a given text .", "we study this problem in the context of debate speeches .", "given such a speech , we aim to identify , from among a set of speeches on the same topic and with an opposing stance , the ones that directly counter it .", "we provide a large dataset of 3 , 685 such speeches ( in english ) , annotated for this relation , which hopefully would be of general interest to the nlp community .", "we explore several algorithms addressing this task , and while some are successful , all fall short of expert human performance , suggesting room for further research .", "all data collected during this work is freely available for research ."], "events": [{"event_type": "ITT", "arguments": [{"text": "educated and informed consumption of media content", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["educated", "and", "informed", "consumption", "of", "media", "content"], "offsets": [1, 2, 3, 4, 5, 6, 7]}], "trigger": {"text": "become", "tokens": ["become"], "offsets": [9]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [61]}, {"text": "task", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["task"], "offsets": [65]}, {"text": "detecting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["detecting"], "offsets": [76]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [62]}}, {"event_type": "PUR", "arguments": [{"text": "articles", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["articles"], "offsets": [77]}], "trigger": {"text": "detecting", "tokens": ["detecting"], "offsets": [76]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [97]}, {"text": "problem", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["problem"], "offsets": [100]}, {"text": "in the context of debate speeches", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "context", "of", "debate", "speeches"], "offsets": [101, 102, 103, 104, 105, 106]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [98]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [113]}, {"text": "ones that directly counter it", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["ones", "that", "directly", "counter", "speech"], "offsets": [135, 136, 137, 138, 111]}, {"text": "from among a set of speeches on the same topic and with an opposing stance", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "among", "a", "set", "of", "speeches", "on", "the", "same", "topic", "and", "with", "an", "opposing", "stance"], "offsets": [118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [116]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [141]}, {"text": "dataset of 3 , 685 such speeches", "nugget_type": "DST", "argument_type": "Content", "tokens": ["dataset", "of", "3", ",", "685", "such", "speeches"], "offsets": [145, 146, 147, 148, 149, 150, 151]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [142]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [174]}, {"text": "algorithms", "nugget_type": "APP", "argument_type": "Content", "tokens": ["algorithms"], "offsets": [177]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [175]}}, {"event_type": "FAC", "arguments": [{"text": "algorithms", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["algorithms"], "offsets": [177]}], "trigger": {"text": "successful", "tokens": ["successful"], "offsets": [186]}}, {"event_type": "FAC", "arguments": [{"text": "algorithms", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["algorithms"], "offsets": [177]}, {"text": "expert human performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["expert", "human", "performance"], "offsets": [192, 193, 194]}], "trigger": {"text": "fall short", "tokens": ["fall", "short"], "offsets": [189, 190]}}, {"event_type": "FAC", "arguments": [{"text": "room for further research", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["room", "for", "further", "research"], "offsets": [197, 198, 199, 200]}], "trigger": {"text": "suggesting", "tokens": ["suggesting"], "offsets": [196]}}], "document": ["an", "educated", "and", "informed", "consumption", "of", "media", "content", "has", "become", "a", "challenge", "in", "modern", "times", ".", "with", "the", "shift", "from", "traditional", "news", "outlets", "to", "social", "media", "and", "similar", "venues", ",", "a", "major", "concern", "is", "that", "readers", "are", "becoming", "encapsulated", "in", "\u201c", "echo", "chambers", "\u201d", "and", "may", "fall", "prey", "to", "fake", "news", "and", "disinformation", ",", "lacking", "easy", "access", "to", "dissenting", "views", ".", "we", "suggest", "a", "novel", "task", "aiming", "to", "alleviate", "some", "of", "these", "concerns", "\u2013", "that", "of", "detecting", "articles", "that", "most", "effectively", "counter", "the", "arguments", "\u2013", "and", "not", "just", "the", "stance", "\u2013", "made", "in", "a", "given", "text", ".", "we", "study", "this", "problem", "in", "the", "context", "of", "debate", "speeches", ".", "given", "such", "a", "speech", ",", "we", "aim", "to", "identify", ",", "from", "among", "a", "set", "of", "speeches", "on", "the", "same", "topic", "and", "with", "an", "opposing", "stance", ",", "the", "ones", "that", "directly", "counter", "it", ".", "we", "provide", "a", "large", "dataset", "of", "3", ",", "685", "such", "speeches", "(", "in", "english", ")", ",", "annotated", "for", "this", "relation", ",", "which", "hopefully", "would", "be", "of", "general", "interest", "to", "the", "nlp", "community", ".", "we", "explore", "several", "algorithms", "addressing", "this", "task", ",", "and", "while", "some", "are", "successful", ",", "all", "fall", "short", "of", "expert", "human", "performance", ",", "suggesting", "room", "for", "further", "research", ".", "all", "data", "collected", "during", "this", "work", "is", "freely", "available", "for", "research", "."]}, {"venue": "ACL", "title": "Leveraging Wikipedia article evolution for promotional tone detection", "abstract": "Detecting biased language is useful for a variety of applications, such as identifying hyperpartisan news sources or flagging one-sided rhetoric. In this work we introduce WikiEvolve, a dataset for document-level promotional tone detection. Unlike previously proposed datasets, WikiEvolve contains seven versions of the same article from Wikipedia, from different points in its revision history; one with promotional tone, and six without it. This allows for obtaining more precise training signal for learning models from promotional tone detection. We adapt the previously proposed gradient reversal layer framework to encode two article versions simultaneously and thus leverage this additional training signal. In our experiments, our proposed adaptation of gradient reversal improves the accuracy of four different architectures on both in-domain and out-of-domain evaluation.", "doc_id": "29c48e029821abb78ad6f4922f974895", "publication_year": 2022, "sentences": ["detecting biased language is useful for a variety of applications , such as identifying hyperpartisan news sources or flagging one - sided rhetoric .", "in this work we introduce wikievolve , a dataset for document - level promotional tone detection .", "unlike previously proposed datasets , wikievolve contains seven versions of the same article from wikipedia , from different points in its revision history ; one with promotional tone , and six without it .", "this allows for obtaining more precise training signal for learning models from promotional tone detection .", "we adapt the previously proposed gradient reversal layer framework to encode two article versions simultaneously and thus leverage this additional training signal .", "in our experiments , our proposed adaptation of gradient reversal improves the accuracy of four different architectures on both in - domain and out - of - domain evaluation ."], "events": [{"event_type": "ITT", "arguments": [{"text": "biased language", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["biased", "language"], "offsets": [1, 2]}], "trigger": {"text": "useful", "tokens": ["useful"], "offsets": [4]}}, {"event_type": "MDS", "arguments": [{"text": "from promotional tone detection", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "promotional", "tone", "detection"], "offsets": [86, 87, 88, 89]}, {"text": "precise training signal", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["precise", "training", "signal"], "offsets": [80, 81, 82]}, {"text": "learning models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["learning", "models"], "offsets": [84, 85]}], "trigger": {"text": "obtaining", "tokens": ["obtaining"], "offsets": [78]}}, {"event_type": "MDS", "arguments": [{"text": "two article versions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["two", "article", "versions"], "offsets": [102, 103, 104]}, {"text": "leverage", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["leverage"], "offsets": [108]}], "trigger": {"text": "encode", "tokens": ["encode"], "offsets": [101]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [91]}, {"text": "previously proposed gradient reversal layer framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["previously", "proposed", "gradient", "reversal", "layer", "framework"], "offsets": [94, 95, 96, 97, 98, 99]}], "trigger": {"text": "adapt", "tokens": ["adapt"], "offsets": [92]}}, {"event_type": "FAC", "arguments": [{"text": "proposed adaptation of gradient reversal", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["proposed", "adaptation", "of", "gradient", "reversal"], "offsets": [119, 120, 121, 122, 123]}, {"text": "accuracy of four different architectures", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["accuracy", "of", "four", "different", "architectures"], "offsets": [126, 127, 128, 129, 130]}, {"text": "in - domain evaluation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["in", "-", "domain", "evaluation"], "offsets": [133, 134, 135, 142]}, {"text": "out - of - domain evaluation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["out", "-", "of", "-", "domain", "evaluation"], "offsets": [137, 138, 139, 140, 141, 142]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [124]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [27]}, {"text": "wikievolve", "nugget_type": "DST", "argument_type": "Content", "tokens": ["wikievolve"], "offsets": [29]}, {"text": "document - level promotional tone detection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["document", "-", "level", "promotional", "tone", "detection"], "offsets": [34, 35, 36, 37, 38, 39]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [28]}}, {"event_type": "PUR", "arguments": [{"text": "additional training signal", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["additional", "training", "signal"], "offsets": [110, 111, 112]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [108]}}], "document": ["detecting", "biased", "language", "is", "useful", "for", "a", "variety", "of", "applications", ",", "such", "as", "identifying", "hyperpartisan", "news", "sources", "or", "flagging", "one", "-", "sided", "rhetoric", ".", "in", "this", "work", "we", "introduce", "wikievolve", ",", "a", "dataset", "for", "document", "-", "level", "promotional", "tone", "detection", ".", "unlike", "previously", "proposed", "datasets", ",", "wikievolve", "contains", "seven", "versions", "of", "the", "same", "article", "from", "wikipedia", ",", "from", "different", "points", "in", "its", "revision", "history", ";", "one", "with", "promotional", "tone", ",", "and", "six", "without", "it", ".", "this", "allows", "for", "obtaining", "more", "precise", "training", "signal", "for", "learning", "models", "from", "promotional", "tone", "detection", ".", "we", "adapt", "the", "previously", "proposed", "gradient", "reversal", "layer", "framework", "to", "encode", "two", "article", "versions", "simultaneously", "and", "thus", "leverage", "this", "additional", "training", "signal", ".", "in", "our", "experiments", ",", "our", "proposed", "adaptation", "of", "gradient", "reversal", "improves", "the", "accuracy", "of", "four", "different", "architectures", "on", "both", "in", "-", "domain", "and", "out", "-", "of", "-", "domain", "evaluation", "."]}, {"venue": "ACL", "title": "Generating Question-Answer Hierarchies", "abstract": "The process of knowledge acquisition can be viewed as a question-answer game between a student and a teacher in which the student typically starts by asking broad, open-ended questions before drilling down into specifics (Hintikka, 1981; Hakkarainen and Sintonen, 2002). This pedagogical perspective motivates a new way of representing documents. In this paper, we present SQUASH (Specificity-controlled Question-Answer Hierarchies), a novel and challenging text generation task that converts an input document into a hierarchy of question-answer pairs. Users can click on high-level questions (e.g., \u201cWhy did Frodo leave the Fellowship?\u201d) to reveal related but more specific questions (e.g., \u201cWho did Frodo leave with?\u201d). Using a question taxonomy loosely based on Lehnert (1978), we classify questions in existing reading comprehension datasets as either GENERAL or SPECIFIC . We then use these labels as input to a pipelined system centered around a conditional neural language model. We extensively evaluate the quality of the generated QA hierarchies through crowdsourced experiments and report strong empirical results.", "doc_id": "c9d0ecfe3f2ce576a32bc03e3e46aefa", "publication_year": 2019, "sentences": ["the process of knowledge acquisition can be viewed as a question - answer game between a student and a teacher in which the student typically starts by asking broad , open - ended questions before drilling down into specifics ( hintikka , 1981 ; hakkarainen and sintonen , 2002 ) .", "this pedagogical perspective motivates a new way of representing documents .", "in this paper , we present squash ( specificity - controlled question - answer hierarchies ) , a novel and challenging text generation task that converts an input document into a hierarchy of question - answer pairs .", "users can click on high - level questions ( e . g . , \u201c why did frodo leave the fellowship ? \u201d ) to reveal related but more specific questions ( e . g . , \u201c who did frodo leave with ? \u201d ) .", "using a question taxonomy loosely based on lehnert ( 1978 ) , we classify questions in existing reading comprehension datasets as either general or specific .", "we then use these labels as input to a pipelined system centered around a conditional neural language model .", "we extensively evaluate the quality of the generated qa hierarchies through crowdsourced experiments and report strong empirical results ."], "events": [{"event_type": "ITT", "arguments": [{"text": "question - answer game", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["question", "-", "answer", "game"], "offsets": [10, 11, 12, 13]}], "trigger": {"text": "viewed", "tokens": ["viewed"], "offsets": [7]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [66]}, {"text": "squash", "nugget_type": "APP", "argument_type": "Content", "tokens": ["squash"], "offsets": [68]}, {"text": "converts", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["converts"], "offsets": [87]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [67]}}, {"event_type": "PUR", "arguments": [{"text": "input document into a hierarchy of question - answer pairs", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["input", "document", "into", "a", "hierarchy", "of", "question", "-", "answer", "pairs"], "offsets": [89, 90, 91, 92, 93, 94, 95, 96, 97, 98]}], "trigger": {"text": "converts", "tokens": ["converts"], "offsets": [87]}}, {"event_type": "MDS", "arguments": [{"text": "high - level questions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["high", "-", "level", "questions"], "offsets": [104, 105, 106, 107]}, {"text": "related but more specific questions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["related", "but", "more", "specific", "questions"], "offsets": [126, 127, 128, 129, 130]}], "trigger": {"text": "reveal", "tokens": ["reveal"], "offsets": [125]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [159]}, {"text": "questions in existing reading comprehension datasets", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["questions", "in", "existing", "reading", "comprehension", "datasets"], "offsets": [161, 162, 163, 164, 165, 166]}, {"text": "as either general or specific", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "either", "general", "or", "specific"], "offsets": [167, 168, 169, 170, 171]}], "trigger": {"text": "classify", "tokens": ["classify"], "offsets": [160]}}, {"event_type": "MDS", "arguments": [{"text": "conditional neural language model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["conditional", "neural", "language", "model"], "offsets": [187, 188, 189, 190]}, {"text": "input to a pipelined system", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["input", "to", "a", "pipelined", "system"], "offsets": [179, 180, 181, 182, 183]}, {"text": "labels", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["labels"], "offsets": [177]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [175]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [192]}, {"text": "quality of the generated qa hierarchies", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["quality", "of", "the", "generated", "qa", "hierarchies"], "offsets": [196, 197, 198, 199, 200, 201]}, {"text": "through crowdsourced experiments", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "crowdsourced", "experiments"], "offsets": [202, 203, 204]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [194]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [192]}, {"text": "strong empirical results", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["strong", "empirical", "results"], "offsets": [207, 208, 209]}], "trigger": {"text": "report", "tokens": ["report"], "offsets": [206]}}], "document": ["the", "process", "of", "knowledge", "acquisition", "can", "be", "viewed", "as", "a", "question", "-", "answer", "game", "between", "a", "student", "and", "a", "teacher", "in", "which", "the", "student", "typically", "starts", "by", "asking", "broad", ",", "open", "-", "ended", "questions", "before", "drilling", "down", "into", "specifics", "(", "hintikka", ",", "1981", ";", "hakkarainen", "and", "sintonen", ",", "2002", ")", ".", "this", "pedagogical", "perspective", "motivates", "a", "new", "way", "of", "representing", "documents", ".", "in", "this", "paper", ",", "we", "present", "squash", "(", "specificity", "-", "controlled", "question", "-", "answer", "hierarchies", ")", ",", "a", "novel", "and", "challenging", "text", "generation", "task", "that", "converts", "an", "input", "document", "into", "a", "hierarchy", "of", "question", "-", "answer", "pairs", ".", "users", "can", "click", "on", "high", "-", "level", "questions", "(", "e", ".", "g", ".", ",", "\u201c", "why", "did", "frodo", "leave", "the", "fellowship", "?", "\u201d", ")", "to", "reveal", "related", "but", "more", "specific", "questions", "(", "e", ".", "g", ".", ",", "\u201c", "who", "did", "frodo", "leave", "with", "?", "\u201d", ")", ".", "using", "a", "question", "taxonomy", "loosely", "based", "on", "lehnert", "(", "1978", ")", ",", "we", "classify", "questions", "in", "existing", "reading", "comprehension", "datasets", "as", "either", "general", "or", "specific", ".", "we", "then", "use", "these", "labels", "as", "input", "to", "a", "pipelined", "system", "centered", "around", "a", "conditional", "neural", "language", "model", ".", "we", "extensively", "evaluate", "the", "quality", "of", "the", "generated", "qa", "hierarchies", "through", "crowdsourced", "experiments", "and", "report", "strong", "empirical", "results", "."]}, {"venue": "ACL", "title": "Evaluating Discourse in Structured Text Representations", "abstract": "Discourse structure is integral to understanding a text and is helpful in many NLP tasks. Learning latent representations of discourse is an attractive alternative to acquiring expensive labeled discourse data. Liu and Lapata (2018) propose a structured attention mechanism for text classification that derives a tree over a text, akin to an RST discourse tree. We examine this model in detail, and evaluate on additional discourse-relevant tasks and datasets, in order to assess whether the structured attention improves performance on the end task and whether it captures a text\u2019s discourse structure. We find the learned latent trees have little to no structure and instead focus on lexical cues; even after obtaining more structured trees with proposed model modifications, the trees are still far from capturing discourse structure when compared to discourse dependency trees from an existing discourse parser. Finally, ablation studies show the structured attention provides little benefit, sometimes even hurting performance.", "doc_id": "c608fb220b27bf96d8d2c3480a22730d", "publication_year": 2019, "sentences": ["discourse structure is integral to understanding a text and is helpful in many nlp tasks .", "learning latent representations of discourse is an attractive alternative to acquiring expensive labeled discourse data .", "liu and lapata ( 2018 ) propose a structured attention mechanism for text classification that derives a tree over a text , akin to an rst discourse tree .", "we examine this model in detail , and evaluate on additional discourse - relevant tasks and datasets , in order to assess whether the structured attention improves performance on the end task and whether it captures a text \u2019 s discourse structure .", "we find the learned latent trees have little to no structure and instead focus on lexical cues ; even after obtaining more structured trees with proposed model modifications , the trees are still far from capturing discourse structure when compared to discourse dependency trees from an existing discourse parser .", "finally , ablation studies show the structured attention provides little benefit , sometimes even hurting performance ."], "events": [{"event_type": "ITT", "arguments": [{"text": "discourse structure", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["discourse", "structure"], "offsets": [0, 1]}], "trigger": {"text": "integral", "tokens": ["integral"], "offsets": [3]}}, {"event_type": "RWS", "arguments": [{"text": "latent representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["latent", "representations"], "offsets": [17, 18]}, {"text": "labeled discourse data", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["labeled", "discourse", "data"], "offsets": [28, 29, 30]}], "trigger": {"text": "acquiring", "tokens": ["acquiring"], "offsets": [26]}}, {"event_type": "RWS", "arguments": [{"text": "text classification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["text", "classification"], "offsets": [44, 45]}, {"text": "tree", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["tree"], "offsets": [49]}, {"text": "structured attention mechanism", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["structured", "attention", "mechanism"], "offsets": [40, 41, 42]}], "trigger": {"text": "derives", "tokens": ["derives"], "offsets": [47]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [61]}, {"text": "model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model"], "offsets": [64]}, {"text": "in detail", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "detail"], "offsets": [65, 66]}], "trigger": {"text": "examine", "tokens": ["examine"], "offsets": [62]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [61]}, {"text": "assess", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["assess"], "offsets": [82]}, {"text": "additional discourse - relevant tasks", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["additional", "discourse", "-", "relevant", "tasks"], "offsets": [71, 72, 73, 74, 75]}, {"text": "additional discourse - relevant datasets", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["additional", "discourse", "-", "relevant", "datasets"], "offsets": [71, 72, 73, 74, 77]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [69]}}, {"event_type": "PUR", "arguments": [{"text": "whether the structured attention improves performance on the end task", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["whether", "the", "structured", "attention", "improves", "performance", "on", "the", "end", "task"], "offsets": [83, 84, 85, 86, 87, 88, 89, 90, 91, 92]}, {"text": "whether it captures a text \u2019 s discourse structure", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["whether", "it", "captures", "a", "text", "\u2019", "s", "discourse", "structure"], "offsets": [94, 95, 96, 97, 98, 99, 100, 101, 102]}], "trigger": {"text": "assess", "tokens": ["assess"], "offsets": [82]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [104]}, {"text": "have little to no structure", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["have", "little", "to", "no", "structure"], "offsets": [110, 111, 112, 113, 114]}, {"text": "focus", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["focus"], "offsets": [117]}, {"text": "far from capturing", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["far", "from", "capturing"], "offsets": [137, 138, 139]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [105]}}, {"event_type": "FAC", "arguments": [{"text": "learned latent trees", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["learned", "latent", "trees"], "offsets": [107, 108, 109]}], "trigger": {"text": "have little to no structure", "tokens": ["have", "little", "to", "no", "structure"], "offsets": [110, 111, 112, 113, 114]}}, {"event_type": "FAC", "arguments": [{"text": "learned latent trees", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["learned", "latent", "trees"], "offsets": [107, 108, 109]}, {"text": "lexical cues", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["lexical", "cues"], "offsets": [119, 120]}], "trigger": {"text": "focus", "tokens": ["focus"], "offsets": [117]}}, {"event_type": "CMP", "arguments": [{"text": "after obtaining more structured trees with proposed model modifications", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["after", "obtaining", "more", "structured", "trees", "with", "proposed", "model", "modifications"], "offsets": [123, 124, 125, 126, 127, 128, 129, 130, 131]}, {"text": "trees", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["trees"], "offsets": [134]}, {"text": "discourse dependency trees", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["discourse", "dependency", "trees"], "offsets": [145, 146, 147]}, {"text": "discourse structure", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["discourse", "structure"], "offsets": [140, 141]}], "trigger": {"text": "far from capturing", "tokens": ["far", "from", "capturing"], "offsets": [137, 138, 139]}}, {"event_type": "FAC", "arguments": [{"text": "structured attention", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["structured", "attention"], "offsets": [160, 161]}, {"text": "little benefit", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["little", "benefit"], "offsets": [163, 164]}, {"text": "hurting performance", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["hurting", "performance"], "offsets": [168, 169]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [162]}}], "document": ["discourse", "structure", "is", "integral", "to", "understanding", "a", "text", "and", "is", "helpful", "in", "many", "nlp", "tasks", ".", "learning", "latent", "representations", "of", "discourse", "is", "an", "attractive", "alternative", "to", "acquiring", "expensive", "labeled", "discourse", "data", ".", "liu", "and", "lapata", "(", "2018", ")", "propose", "a", "structured", "attention", "mechanism", "for", "text", "classification", "that", "derives", "a", "tree", "over", "a", "text", ",", "akin", "to", "an", "rst", "discourse", "tree", ".", "we", "examine", "this", "model", "in", "detail", ",", "and", "evaluate", "on", "additional", "discourse", "-", "relevant", "tasks", "and", "datasets", ",", "in", "order", "to", "assess", "whether", "the", "structured", "attention", "improves", "performance", "on", "the", "end", "task", "and", "whether", "it", "captures", "a", "text", "\u2019", "s", "discourse", "structure", ".", "we", "find", "the", "learned", "latent", "trees", "have", "little", "to", "no", "structure", "and", "instead", "focus", "on", "lexical", "cues", ";", "even", "after", "obtaining", "more", "structured", "trees", "with", "proposed", "model", "modifications", ",", "the", "trees", "are", "still", "far", "from", "capturing", "discourse", "structure", "when", "compared", "to", "discourse", "dependency", "trees", "from", "an", "existing", "discourse", "parser", ".", "finally", ",", "ablation", "studies", "show", "the", "structured", "attention", "provides", "little", "benefit", ",", "sometimes", "even", "hurting", "performance", "."]}, {"venue": "ACL", "title": "Learning From Failure: Data Capture in an Australian Aboriginal Community", "abstract": "Most low resource language technology development is premised on the need to collect data for training statistical models. When we follow the typical process of recording and transcribing text for small Indigenous languages, we hit up against the so-called \u201ctranscription bottleneck.\u201d Therefore it is worth exploring new ways of engaging with speakers which generate data while avoiding the transcription bottleneck. We have deployed a prototype app for speakers to use for confirming system guesses in an approach to transcription based on word spotting. However, in the process of testing the app we encountered many new problems for engagement with speakers. This paper presents a close-up study of the process of deploying data capture technology on the ground in an Australian Aboriginal community. We reflect on our interactions with participants and draw lessons that apply to anyone seeking to develop methods for language data collection in an Indigenous community.", "doc_id": "1645a975f25c6129c4de1908bb79bc1e", "publication_year": 2022, "sentences": ["most low resource language technology development is premised on the need to collect data for training statistical models .", "when we follow the typical process of recording and transcribing text for small indigenous languages , we hit up against the so - called \u201c transcription bottleneck . \u201d", "therefore it is worth exploring new ways of engaging with speakers which generate data while avoiding the transcription bottleneck .", "we have deployed a prototype app for speakers to use for confirming system guesses in an approach to transcription based on word spotting .", "however , in the process of testing the app we encountered many new problems for engagement with speakers .", "this paper presents a close - up study of the process of deploying data capture technology on the ground in an australian aboriginal community .", "we reflect on our interactions with participants and draw lessons that apply to anyone seeking to develop methods for language data collection in an indigenous community ."], "events": [{"event_type": "ITT", "arguments": [{"text": "low resource language technology development", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["low", "resource", "language", "technology", "development"], "offsets": [1, 2, 3, 4, 5]}], "trigger": {"text": "premised", "tokens": ["premised"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "typical process", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["typical", "process"], "offsets": [23, 24]}, {"text": "transcription bottleneck", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["transcription", "bottleneck"], "offsets": [44, 45]}, {"text": "small indigenous languages", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["small", "indigenous", "languages"], "offsets": [31, 32, 33]}], "trigger": {"text": "hit", "tokens": ["hit"], "offsets": [36]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [68]}, {"text": "prototype app", "nugget_type": "APP", "argument_type": "Content", "tokens": ["prototype", "app"], "offsets": [72, 73]}, {"text": "confirming", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["confirming"], "offsets": [79]}], "trigger": {"text": "deployed", "tokens": ["deployed"], "offsets": [70]}}, {"event_type": "PUR", "arguments": [{"text": "system guesses", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["system", "guesses"], "offsets": [80, 81]}, {"text": "in an approach to transcription based on word spotting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "an", "approach", "to", "transcription", "based", "on", "word", "spotting"], "offsets": [82, 83, 84, 85, 86, 87, 88, 89, 90]}], "trigger": {"text": "confirming", "tokens": ["confirming"], "offsets": [79]}}, {"event_type": "WKS", "arguments": [{"text": "close - up study of the process of deploying data", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["close", "-", "up", "study", "of", "the", "process", "of", "deploying", "data"], "offsets": [115, 116, 117, 118, 119, 120, 121, 122, 123, 124]}], "trigger": {"text": "presents", "tokens": ["presents"], "offsets": [113]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [136]}, {"text": "interactions", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["interactions"], "offsets": [140]}, {"text": "with participants", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "participants"], "offsets": [141, 142]}], "trigger": {"text": "reflect", "tokens": ["reflect"], "offsets": [137]}}, {"event_type": "WKS", "arguments": [{"text": "lessons", "nugget_type": "APP", "argument_type": "Content", "tokens": ["lessons"], "offsets": [145]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [136]}, {"text": "develop", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["develop"], "offsets": [152]}, {"text": "language data collection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["language", "data", "collection"], "offsets": [155, 156, 157]}, {"text": "in an indigenous community", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "an", "indigenous", "community"], "offsets": [158, 159, 160, 161]}], "trigger": {"text": "draw", "tokens": ["draw"], "offsets": [144]}}, {"event_type": "PUR", "arguments": [{"text": "methods", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["methods"], "offsets": [153]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [152]}}], "document": ["most", "low", "resource", "language", "technology", "development", "is", "premised", "on", "the", "need", "to", "collect", "data", "for", "training", "statistical", "models", ".", "when", "we", "follow", "the", "typical", "process", "of", "recording", "and", "transcribing", "text", "for", "small", "indigenous", "languages", ",", "we", "hit", "up", "against", "the", "so", "-", "called", "\u201c", "transcription", "bottleneck", ".", "\u201d", "therefore", "it", "is", "worth", "exploring", "new", "ways", "of", "engaging", "with", "speakers", "which", "generate", "data", "while", "avoiding", "the", "transcription", "bottleneck", ".", "we", "have", "deployed", "a", "prototype", "app", "for", "speakers", "to", "use", "for", "confirming", "system", "guesses", "in", "an", "approach", "to", "transcription", "based", "on", "word", "spotting", ".", "however", ",", "in", "the", "process", "of", "testing", "the", "app", "we", "encountered", "many", "new", "problems", "for", "engagement", "with", "speakers", ".", "this", "paper", "presents", "a", "close", "-", "up", "study", "of", "the", "process", "of", "deploying", "data", "capture", "technology", "on", "the", "ground", "in", "an", "australian", "aboriginal", "community", ".", "we", "reflect", "on", "our", "interactions", "with", "participants", "and", "draw", "lessons", "that", "apply", "to", "anyone", "seeking", "to", "develop", "methods", "for", "language", "data", "collection", "in", "an", "indigenous", "community", "."]}, {"venue": "ACL", "title": "Generating SOAP Notes from Doctor-Patient Conversations Using Modular Summarization Techniques", "abstract": "Following each patient visit, physicians draft long semi-structured clinical summaries called SOAP notes. While invaluable to clinicians and researchers, creating digital SOAP notes is burdensome, contributing to physician burnout. In this paper, we introduce the first complete pipelines to leverage deep summarization models to generate these notes based on transcripts of conversations between physicians and patients. After exploring a spectrum of methods across the extractive-abstractive spectrum, we propose Cluster2Sent, an algorithm that (i) extracts important utterances relevant to each summary section; (ii) clusters together related utterances; and then (iii) generates one summary sentence per cluster. Cluster2Sent outperforms its purely abstractive counterpart by 8 ROUGE-1 points, and produces significantly more factual and coherent sentences as assessed by expert human evaluators. For reproducibility, we demonstrate similar benefits on the publicly available AMI dataset. Our results speak to the benefits of structuring summaries into sections and annotating supporting evidence when constructing summarization corpora.", "doc_id": "0277a79957b7867e9e2c7b1e08c9843f", "publication_year": 2021, "sentences": ["following each patient visit , physicians draft long semi - structured clinical summaries called soap notes .", "while invaluable to clinicians and researchers , creating digital soap notes is burdensome , contributing to physician burnout .", "in this paper , we introduce the first complete pipelines to leverage deep summarization models to generate these notes based on transcripts of conversations between physicians and patients .", "after exploring a spectrum of methods across the extractive - abstractive spectrum , we propose cluster2sent , an algorithm that ( i ) extracts important utterances relevant to each summary section ; ( ii ) clusters together related utterances ; and then ( iii ) generates one summary sentence per cluster .", "cluster2sent outperforms its purely abstractive counterpart by 8 rouge - 1 points , and produces significantly more factual and coherent sentences as assessed by expert human evaluators .", "for reproducibility , we demonstrate similar benefits on the publicly available ami dataset .", "our results speak to the benefits of structuring summaries into sections and annotating supporting evidence when constructing summarization corpora ."], "events": [{"event_type": "ITT", "arguments": [{"text": "clinical summaries", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["clinical", "summaries"], "offsets": [11, 12]}], "trigger": {"text": "draft", "tokens": ["draft"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "burdensome", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["burdensome"], "offsets": [29]}], "trigger": {"text": "burdensome", "tokens": ["burdensome"], "offsets": [29]}}, {"event_type": "RWF", "arguments": [{"text": "physician burnout", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["physician", "burnout"], "offsets": [33, 34]}, {"text": "digital soap notes", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["digital", "soap", "notes"], "offsets": [25, 26, 27]}], "trigger": {"text": "contributing to", "tokens": ["contributing", "to"], "offsets": [31, 32]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [40]}, {"text": "complete pipelines", "nugget_type": "APP", "argument_type": "Content", "tokens": ["complete", "pipelines"], "offsets": [44, 45]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [41]}}, {"event_type": "MDS", "arguments": [{"text": "deep summarization models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["deep", "summarization", "models"], "offsets": [48, 49, 50]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [52]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [47]}}, {"event_type": "PUR", "arguments": [{"text": "notes based on transcripts of conversations between physicians and patients", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["notes", "based", "on", "transcripts", "of", "conversations", "between", "physicians", "and", "patients"], "offsets": [54, 55, 56, 57, 58, 59, 60, 61, 62, 63]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [52]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [78]}, {"text": "cluster2sent", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["cluster2sent"], "offsets": [80]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [79]}}, {"event_type": "MDS", "arguments": [{"text": "important utterances relevant to each summary section", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["important", "utterances", "relevant", "to", "each", "summary", "section"], "offsets": [89, 90, 91, 92, 93, 94, 95]}], "trigger": {"text": "extracts", "tokens": ["extracts"], "offsets": [88]}}, {"event_type": "MDS", "arguments": [{"text": "related utterances", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["related", "utterances"], "offsets": [102, 103]}], "trigger": {"text": "clusters together", "tokens": ["clusters", "together"], "offsets": [100, 101]}}, {"event_type": "MDS", "arguments": [{"text": "one summary sentence", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["one", "summary", "sentence"], "offsets": [111, 112, 113]}, {"text": "per cluster", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["per", "cluster"], "offsets": [114, 115]}], "trigger": {"text": "generates", "tokens": ["generates"], "offsets": [110]}}, {"event_type": "CMP", "arguments": [{"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [118]}, {"text": "cluster2sent", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["cluster2sent"], "offsets": [117]}, {"text": "purely abstractive counterpart", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["purely", "abstractive", "counterpart"], "offsets": [120, 121, 122]}, {"text": "8", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["8"], "offsets": [124]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [118]}}, {"event_type": "CMP", "arguments": [{"text": "cluster2sent", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["cluster2sent"], "offsets": [117]}, {"text": "purely abstractive counterpart", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["purely", "abstractive", "counterpart"], "offsets": [120, 121, 122]}, {"text": "as assessed by expert human evaluators", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "assessed", "by", "expert", "human", "evaluators"], "offsets": [138, 139, 140, 141, 142, 143]}, {"text": "significantly more", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly", "more"], "offsets": [132, 133]}, {"text": "factual and coherent sentences", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["factual", "and", "coherent", "sentences"], "offsets": [134, 135, 136, 137]}], "trigger": {"text": "produces", "tokens": ["produces"], "offsets": [131]}}, {"event_type": "WKS", "arguments": [{"text": "reproducibility", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["reproducibility"], "offsets": [146]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [148]}, {"text": "publicly available ami dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["publicly", "available", "ami", "dataset"], "offsets": [154, 155, 156, 157]}, {"text": "similar benefits", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["similar", "benefits"], "offsets": [150, 151]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [149]}}, {"event_type": "FAC", "arguments": [{"text": "when constructing summarization corpora", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "constructing", "summarization", "corpora"], "offsets": [174, 175, 176, 177]}, {"text": "benefits of structuring summaries into sections and annotating supporting evidence", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["benefits", "of", "structuring", "summaries", "into", "sections", "and", "annotating", "supporting", "evidence"], "offsets": [164, 165, 166, 167, 168, 169, 170, 171, 172, 173]}], "trigger": {"text": "speak", "tokens": ["speak"], "offsets": [161]}}], "document": ["following", "each", "patient", "visit", ",", "physicians", "draft", "long", "semi", "-", "structured", "clinical", "summaries", "called", "soap", "notes", ".", "while", "invaluable", "to", "clinicians", "and", "researchers", ",", "creating", "digital", "soap", "notes", "is", "burdensome", ",", "contributing", "to", "physician", "burnout", ".", "in", "this", "paper", ",", "we", "introduce", "the", "first", "complete", "pipelines", "to", "leverage", "deep", "summarization", "models", "to", "generate", "these", "notes", "based", "on", "transcripts", "of", "conversations", "between", "physicians", "and", "patients", ".", "after", "exploring", "a", "spectrum", "of", "methods", "across", "the", "extractive", "-", "abstractive", "spectrum", ",", "we", "propose", "cluster2sent", ",", "an", "algorithm", "that", "(", "i", ")", "extracts", "important", "utterances", "relevant", "to", "each", "summary", "section", ";", "(", "ii", ")", "clusters", "together", "related", "utterances", ";", "and", "then", "(", "iii", ")", "generates", "one", "summary", "sentence", "per", "cluster", ".", "cluster2sent", "outperforms", "its", "purely", "abstractive", "counterpart", "by", "8", "rouge", "-", "1", "points", ",", "and", "produces", "significantly", "more", "factual", "and", "coherent", "sentences", "as", "assessed", "by", "expert", "human", "evaluators", ".", "for", "reproducibility", ",", "we", "demonstrate", "similar", "benefits", "on", "the", "publicly", "available", "ami", "dataset", ".", "our", "results", "speak", "to", "the", "benefits", "of", "structuring", "summaries", "into", "sections", "and", "annotating", "supporting", "evidence", "when", "constructing", "summarization", "corpora", "."]}, {"venue": "ACL", "title": "Training Data is More Valuable than You Think: A Simple and Effective Method by Retrieving from Training Data", "abstract": "Retrieval-based methods have been shown to be effective in NLP tasks via introducing external knowledge. However, the indexing and retrieving of large-scale corpora bring considerable computational cost. Surprisingly, we found that REtrieving from the traINing datA (REINA) only can lead to significant gains on multiple NLG and NLU tasks. We retrieve the labeled training instances most similar to the input text and then concatenate them with the input to feed into the model to generate the output. Experimental results show that this simple method can achieve significantly better performance on a variety of NLU and NLG tasks, including summarization, machine translation, language modeling, and question answering tasks. For instance, our proposed method achieved state-of-the-art results on XSum, BigPatent, and CommonsenseQA. Our code is released, https://github.com/microsoft/REINA .", "doc_id": "dc398ab3fd3cb3b22742d4dc7a5cd1c1", "publication_year": 2022, "sentences": ["retrieval - based methods have been shown to be effective in nlp tasks via introducing external knowledge .", "however , the indexing and retrieving of large - scale corpora bring considerable computational cost .", "surprisingly , we found that retrieving from the training data ( reina ) only can lead to significant gains on multiple nlg and nlu tasks .", "we retrieve the labeled training instances most similar to the input text and then concatenate them with the input to feed into the model to generate the output .", "experimental results show that this simple method can achieve significantly better performance on a variety of nlu and nlg tasks , including summarization , machine translation , language modeling , and question answering tasks .", "for instance , our proposed method achieved state - of - the - art results on xsum , bigpatent , and commonsenseqa .", "our code is released , https : / / github . com / microsoft / reina ."], "events": [{"event_type": "ITT", "arguments": [{"text": "nlp tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nlp", "tasks"], "offsets": [11, 12]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [9]}}, {"event_type": "RWF", "arguments": [{"text": "considerable computational cost", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["considerable", "computational", "cost"], "offsets": [30, 31, 32]}, {"text": "indexing of large - scale corpora", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["indexing", "of", "large", "-", "scale", "corpora"], "offsets": [21, 24, 25, 26, 27, 28]}, {"text": "retrieving of large - scale corpora", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["retrieving", "of", "large", "-", "scale", "corpora"], "offsets": [23, 24, 25, 26, 27, 28]}], "trigger": {"text": "bring", "tokens": ["bring"], "offsets": [29]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [60]}, {"text": "most similar to the input text", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["most", "similar", "to", "the", "input", "text"], "offsets": [66, 67, 68, 69, 70, 71]}, {"text": "labeled training instances", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["labeled", "training", "instances"], "offsets": [63, 64, 65]}], "trigger": {"text": "retrieve", "tokens": ["retrieve"], "offsets": [61]}}, {"event_type": "MDS", "arguments": [{"text": "labeled training instances", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["labeled", "training", "instances"], "offsets": [63, 64, 65]}, {"text": "input", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["input"], "offsets": [78]}], "trigger": {"text": "concatenate", "tokens": ["concatenate"], "offsets": [74]}}, {"event_type": "PUR", "arguments": [{"text": "output", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["output"], "offsets": [87]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [85]}}, {"event_type": "FIN", "arguments": [{"text": "achieve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieve"], "offsets": [97]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [91]}}, {"event_type": "FAC", "arguments": [{"text": "method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["method"], "offsets": [129]}, {"text": "state - of - the - art results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [131, 132, 133, 134, 135, 136, 137, 138]}, {"text": "on xsum , bigpatent , and commonsenseqa", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "xsum", ",", "bigpatent", ",", "and", "commonsenseqa"], "offsets": [139, 140, 141, 142, 143, 144, 145]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [130]}}, {"event_type": "RWF", "arguments": [{"text": "retrieving from the training data", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["retrieving", "from", "the", "training", "data"], "offsets": [39, 40, 41, 42, 43]}, {"text": "only can lead", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["only", "can", "lead"], "offsets": [47, 48, 49]}, {"text": "significant gains", "nugget_type": "STR", "argument_type": "Target", "tokens": ["significant", "gains"], "offsets": [51, 52]}], "trigger": {"text": "only can lead", "tokens": ["only", "can", "lead"], "offsets": [47, 48, 49]}}, {"event_type": "MDS", "arguments": [{"text": "into the model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["into", "the", "model"], "offsets": [81, 82, 83]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [85]}, {"text": "input", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["input"], "offsets": [78]}, {"text": "labeled training instances", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["labeled", "training", "instances"], "offsets": [63, 64, 65]}], "trigger": {"text": "feed", "tokens": ["feed"], "offsets": [80]}}, {"event_type": "FAC", "arguments": [{"text": "this simple method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["this", "simple", "method"], "offsets": [93, 94, 95]}, {"text": "significantly better performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["significantly", "better", "performance"], "offsets": [98, 99, 100]}, {"text": "summarization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["summarization"], "offsets": [111]}, {"text": "machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["machine", "translation"], "offsets": [113, 114]}, {"text": "language modeling", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["language", "modeling"], "offsets": [116, 117]}, {"text": "question answering tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["question", "answering", "tasks"], "offsets": [120, 121, 122]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [97]}}], "document": ["retrieval", "-", "based", "methods", "have", "been", "shown", "to", "be", "effective", "in", "nlp", "tasks", "via", "introducing", "external", "knowledge", ".", "however", ",", "the", "indexing", "and", "retrieving", "of", "large", "-", "scale", "corpora", "bring", "considerable", "computational", "cost", ".", "surprisingly", ",", "we", "found", "that", "retrieving", "from", "the", "training", "data", "(", "reina", ")", "only", "can", "lead", "to", "significant", "gains", "on", "multiple", "nlg", "and", "nlu", "tasks", ".", "we", "retrieve", "the", "labeled", "training", "instances", "most", "similar", "to", "the", "input", "text", "and", "then", "concatenate", "them", "with", "the", "input", "to", "feed", "into", "the", "model", "to", "generate", "the", "output", ".", "experimental", "results", "show", "that", "this", "simple", "method", "can", "achieve", "significantly", "better", "performance", "on", "a", "variety", "of", "nlu", "and", "nlg", "tasks", ",", "including", "summarization", ",", "machine", "translation", ",", "language", "modeling", ",", "and", "question", "answering", "tasks", ".", "for", "instance", ",", "our", "proposed", "method", "achieved", "state", "-", "of", "-", "the", "-", "art", "results", "on", "xsum", ",", "bigpatent", ",", "and", "commonsenseqa", ".", "our", "code", "is", "released", ",", "https", ":", "/", "/", "github", ".", "com", "/", "microsoft", "/", "reina", "."]}, {"venue": "ACL", "title": "Speaker Sensitive Response Evaluation Model", "abstract": "Automatic evaluation of open-domain dialogue response generation is very challenging because there are many appropriate responses for a given context. Existing evaluation models merely compare the generated response with the ground truth response and rate many of the appropriate responses as inappropriate if they deviate from the ground truth. One approach to resolve this problem is to consider the similarity of the generated response with the conversational context. In this paper, we propose an automatic evaluation model based on that idea and learn the model parameters from an unlabeled conversation corpus. Our approach considers the speakers in defining the different levels of similar context. We use a Twitter conversation corpus that contains many speakers and conversations to test our evaluation model. Experiments show that our model outperforms the other existing evaluation metrics in terms of high correlation with human annotation scores. We also show that our model trained on Twitter can be applied to movie dialogues without any additional training. We provide our code and the learned parameters so that they can be used for automatic evaluation of dialogue response generation models.", "doc_id": "9f0c70217e24a5146fc256ae31695fa3", "publication_year": 2020, "sentences": ["automatic evaluation of open - domain dialogue response generation is very challenging because there are many appropriate responses for a given context .", "existing evaluation models merely compare the generated response with the ground truth response and rate many of the appropriate responses as inappropriate if they deviate from the ground truth .", "one approach to resolve this problem is to consider the similarity of the generated response with the conversational context .", "in this paper , we propose an automatic evaluation model based on that idea and learn the model parameters from an unlabeled conversation corpus .", "our approach considers the speakers in defining the different levels of similar context .", "we use a twitter conversation corpus that contains many speakers and conversations to test our evaluation model .", "experiments show that our model outperforms the other existing evaluation metrics in terms of high correlation with human annotation scores .", "we also show that our model trained on twitter can be applied to movie dialogues without any additional training .", "we provide our code and the learned parameters so that they can be used for automatic evaluation of dialogue response generation models ."], "events": [{"event_type": "FAC", "arguments": [{"text": "twitter conversation corpus", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["twitter", "conversation", "corpus"], "offsets": [115, 116, 117]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [113]}}, {"event_type": "FIN", "arguments": [{"text": "applied", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["applied"], "offsets": [162]}, {"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [151]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [153]}}, {"event_type": "FAC", "arguments": [{"text": "movie dialogues", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["movie", "dialogues"], "offsets": [164, 165]}, {"text": "without any additional training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "any", "additional", "training"], "offsets": [166, 167, 168, 169]}, {"text": "our model trained on twitter", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["automatic", "evaluation", "model", "trained", "on", "twitter"], "offsets": [80, 81, 82, 157, 158, 159]}], "trigger": {"text": "applied", "tokens": ["applied"], "offsets": [162]}}, {"event_type": "ITT", "arguments": [{"text": "automatic evaluation of open - domain dialogue response generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["automatic", "evaluation", "of", "open", "-", "domain", "dialogue", "response", "generation"], "offsets": [0, 1, 2, 3, 4, 5, 6, 7, 8]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [11]}}, {"event_type": "RWS", "arguments": [{"text": "existing evaluation models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "evaluation", "models"], "offsets": [23, 24, 25]}, {"text": "ground truth response", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["ground", "truth", "response"], "offsets": [33, 34, 35]}, {"text": "generated response", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["generated", "response"], "offsets": [29, 30]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [27]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [77]}, {"text": "automatic evaluation model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["automatic", "evaluation", "model"], "offsets": [80, 81, 82]}, {"text": "consider", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["consider"], "offsets": [61]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [78]}}, {"event_type": "PUR", "arguments": [{"text": "similarity of the generated response with the conversational context", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["similarity", "of", "the", "generated", "response", "with", "the", "conversational", "context"], "offsets": [63, 64, 65, 66, 67, 68, 69, 70, 71]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [61]}}, {"event_type": "MDS", "arguments": [{"text": "unlabeled conversation corpus", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["unlabeled", "conversation", "corpus"], "offsets": [94, 95, 96]}, {"text": "model parameters", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["model", "parameters"], "offsets": [90, 91]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [88]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [112]}, {"text": "twitter conversation corpus", "nugget_type": "DST", "argument_type": "Content", "tokens": ["twitter", "conversation", "corpus"], "offsets": [115, 116, 117]}, {"text": "test", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["test"], "offsets": [125]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [113]}}, {"event_type": "PUR", "arguments": [{"text": "automatic evaluation model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["automatic", "evaluation", "model"], "offsets": [80, 81, 82]}], "trigger": {"text": "test", "tokens": ["test"], "offsets": [125]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [135]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [131]}}, {"event_type": "CMP", "arguments": [{"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [135]}, {"text": "automatic evaluation model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["automatic", "evaluation", "model"], "offsets": [80, 81, 82]}, {"text": "other existing evaluation metrics", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["other", "existing", "evaluation", "metrics"], "offsets": [137, 138, 139, 140]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [135]}}], "document": ["automatic", "evaluation", "of", "open", "-", "domain", "dialogue", "response", "generation", "is", "very", "challenging", "because", "there", "are", "many", "appropriate", "responses", "for", "a", "given", "context", ".", "existing", "evaluation", "models", "merely", "compare", "the", "generated", "response", "with", "the", "ground", "truth", "response", "and", "rate", "many", "of", "the", "appropriate", "responses", "as", "inappropriate", "if", "they", "deviate", "from", "the", "ground", "truth", ".", "one", "approach", "to", "resolve", "this", "problem", "is", "to", "consider", "the", "similarity", "of", "the", "generated", "response", "with", "the", "conversational", "context", ".", "in", "this", "paper", ",", "we", "propose", "an", "automatic", "evaluation", "model", "based", "on", "that", "idea", "and", "learn", "the", "model", "parameters", "from", "an", "unlabeled", "conversation", "corpus", ".", "our", "approach", "considers", "the", "speakers", "in", "defining", "the", "different", "levels", "of", "similar", "context", ".", "we", "use", "a", "twitter", "conversation", "corpus", "that", "contains", "many", "speakers", "and", "conversations", "to", "test", "our", "evaluation", "model", ".", "experiments", "show", "that", "our", "model", "outperforms", "the", "other", "existing", "evaluation", "metrics", "in", "terms", "of", "high", "correlation", "with", "human", "annotation", "scores", ".", "we", "also", "show", "that", "our", "model", "trained", "on", "twitter", "can", "be", "applied", "to", "movie", "dialogues", "without", "any", "additional", "training", ".", "we", "provide", "our", "code", "and", "the", "learned", "parameters", "so", "that", "they", "can", "be", "used", "for", "automatic", "evaluation", "of", "dialogue", "response", "generation", "models", "."]}, {"venue": "ACL", "title": "Packed Levitated Marker for Entity and Relation Extraction", "abstract": "Recent entity and relation extraction works focus on investigating how to obtain a better span representation from the pre-trained encoder. However, a major limitation of existing works is that they ignore the interrelation between spans (pairs). In this work, we propose a novel span representation approach, named Packed Levitated Markers (PL-Marker), to consider the interrelation between the spans (pairs) by strategically packing the markers in the encoder. In particular, we propose a neighborhood-oriented packing strategy, which considers the neighbor spans integrally to better model the entity boundary information. Furthermore, for those more complicated span pair classification tasks, we design a subject-oriented packing strategy, which packs each subject and all its objects to model the interrelation between the same-subject span pairs. The experimental results show that, with the enhanced marker feature, our model advances baselines on six NER benchmarks, and obtains a 4.1%-4.3% strict relation F1 improvement with higher speed over previous state-of-the-art models on ACE04 and ACE05. Our code and models are publicly available at https://github.com/thunlp/PL-Marker", "doc_id": "d6af267b63cc21b8a4313a35774a141b", "publication_year": 2022, "sentences": ["recent entity and relation extraction works focus on investigating how to obtain a better span representation from the pre - trained encoder .", "however , a major limitation of existing works is that they ignore the interrelation between spans ( pairs ) .", "in this work , we propose a novel span representation approach , named packed levitated markers ( pl - marker ) , to consider the interrelation between the spans ( pairs ) by strategically packing the markers in the encoder .", "in particular , we propose a neighborhood - oriented packing strategy , which considers the neighbor spans integrally to better model the entity boundary information .", "furthermore , for those more complicated span pair classification tasks , we design a subject - oriented packing strategy , which packs each subject and all its objects to model the interrelation between the same - subject span pairs .", "the experimental results show that , with the enhanced marker feature , our model advances baselines on six ner benchmarks , and obtains a 4 . 1 % - 4 . 3 % strict relation f1 improvement with higher speed over previous state - of - the - art models on ace04 and ace05 .", "our code and models are publicly available at https : / / github . com / thunlp / pl - marker"], "events": [{"event_type": "ITT", "arguments": [{"text": "entity extraction works", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["entity", "extraction", "works"], "offsets": [1, 4, 5]}, {"text": "relation extraction works", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["relation", "extraction", "works"], "offsets": [3, 4, 5]}], "trigger": {"text": "focus", "tokens": ["focus"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "ignore", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["ignore"], "offsets": [34]}, {"text": "limitation", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["limitation"], "offsets": [27]}], "trigger": {"text": "ignore", "tokens": ["ignore"], "offsets": [34]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [47]}, {"text": "packed levitated markers", "nugget_type": "APP", "argument_type": "Content", "tokens": ["packed", "levitated", "markers"], "offsets": [56, 57, 58]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [48]}}, {"event_type": "MDS", "arguments": [{"text": "encoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["encoder"], "offsets": [82]}, {"text": "markers", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["markers"], "offsets": [79]}, {"text": "consider", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["consider"], "offsets": [66]}], "trigger": {"text": "strategically packing", "tokens": ["strategically", "packing"], "offsets": [76, 77]}}, {"event_type": "PUR", "arguments": [{"text": "interrelation between the spans", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["interrelation", "between", "the", "spans"], "offsets": [68, 69, 70, 71]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [66]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [87]}, {"text": "neighborhood - oriented packing strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neighborhood", "-", "oriented", "packing", "strategy"], "offsets": [90, 91, 92, 93, 94]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [88]}}, {"event_type": "WKS", "arguments": [{"text": "neighbor spans", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["neighbor", "spans"], "offsets": [99, 100]}, {"text": "better model", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["better", "model"], "offsets": [103, 104]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [87]}], "trigger": {"text": "considers", "tokens": ["considers"], "offsets": [97]}}, {"event_type": "PUR", "arguments": [{"text": "entity boundary information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["entity", "boundary", "information"], "offsets": [106, 107, 108]}], "trigger": {"text": "better model", "tokens": ["better", "model"], "offsets": [103, 104]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [121]}, {"text": "subject - oriented packing strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["subject", "-", "oriented", "packing", "strategy"], "offsets": [124, 125, 126, 127, 128]}, {"text": "more complicated span pair classification tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["more", "complicated", "span", "pair", "classification", "tasks"], "offsets": [114, 115, 116, 117, 118, 119]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [122]}}, {"event_type": "MDS", "arguments": [{"text": "each subject and all its objects", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["each", "subject", "and", "all", "subject", "objects"], "offsets": [132, 133, 134, 135, 133, 137]}, {"text": "model", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["model"], "offsets": [139]}], "trigger": {"text": "packs", "tokens": ["packs"], "offsets": [131]}}, {"event_type": "CMP", "arguments": [{"text": "speed", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["speed"], "offsets": [189]}, {"text": "previous state - of - the - art models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "state", "-", "of", "-", "the", "-", "art", "models"], "offsets": [191, 192, 193, 194, 195, 196, 197, 198, 199]}, {"text": "packed levitated markers", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["packed", "levitated", "markers"], "offsets": [56, 57, 58]}, {"text": "4 . 1 % - 4 . 3 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["4", ".", "1", "%", "-", "4", ".", "3", "%"], "offsets": [174, 175, 176, 177, 178, 179, 180, 181, 182]}, {"text": "strict relation f1 improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["strict", "relation", "f1", "improvement"], "offsets": [183, 184, 185, 186]}, {"text": "higher", "nugget_type": "STR", "argument_type": "Result", "tokens": ["higher"], "offsets": [188]}, {"text": "ace04", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ace04"], "offsets": [201]}, {"text": "ace05", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ace05"], "offsets": [203]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [172]}}, {"event_type": "PUR", "arguments": [{"text": "interrelation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["interrelation"], "offsets": [141]}, {"text": "between the same - subject span pairs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "the", "same", "-", "subject", "span", "pairs"], "offsets": [142, 143, 144, 145, 146, 147, 148]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [139]}}, {"event_type": "FIN", "arguments": [{"text": "obtains", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["obtains"], "offsets": [172]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [153]}}, {"event_type": "CMP", "arguments": [{"text": "with the enhanced marker feature", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "the", "enhanced", "marker", "feature"], "offsets": [156, 157, 158, 159, 160]}, {"text": "advances", "nugget_type": "STR", "argument_type": "Result", "tokens": ["advances"], "offsets": [164]}, {"text": "our model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["our", "model"], "offsets": [162, 163]}, {"text": "baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baselines"], "offsets": [165]}], "trigger": {"text": "advances", "tokens": ["advances"], "offsets": [164]}}], "document": ["recent", "entity", "and", "relation", "extraction", "works", "focus", "on", "investigating", "how", "to", "obtain", "a", "better", "span", "representation", "from", "the", "pre", "-", "trained", "encoder", ".", "however", ",", "a", "major", "limitation", "of", "existing", "works", "is", "that", "they", "ignore", "the", "interrelation", "between", "spans", "(", "pairs", ")", ".", "in", "this", "work", ",", "we", "propose", "a", "novel", "span", "representation", "approach", ",", "named", "packed", "levitated", "markers", "(", "pl", "-", "marker", ")", ",", "to", "consider", "the", "interrelation", "between", "the", "spans", "(", "pairs", ")", "by", "strategically", "packing", "the", "markers", "in", "the", "encoder", ".", "in", "particular", ",", "we", "propose", "a", "neighborhood", "-", "oriented", "packing", "strategy", ",", "which", "considers", "the", "neighbor", "spans", "integrally", "to", "better", "model", "the", "entity", "boundary", "information", ".", "furthermore", ",", "for", "those", "more", "complicated", "span", "pair", "classification", "tasks", ",", "we", "design", "a", "subject", "-", "oriented", "packing", "strategy", ",", "which", "packs", "each", "subject", "and", "all", "its", "objects", "to", "model", "the", "interrelation", "between", "the", "same", "-", "subject", "span", "pairs", ".", "the", "experimental", "results", "show", "that", ",", "with", "the", "enhanced", "marker", "feature", ",", "our", "model", "advances", "baselines", "on", "six", "ner", "benchmarks", ",", "and", "obtains", "a", "4", ".", "1", "%", "-", "4", ".", "3", "%", "strict", "relation", "f1", "improvement", "with", "higher", "speed", "over", "previous", "state", "-", "of", "-", "the", "-", "art", "models", "on", "ace04", "and", "ace05", ".", "our", "code", "and", "models", "are", "publicly", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "thunlp", "/", "pl", "-", "marker"]}, {"venue": "ACL", "title": "Enhancing Descriptive Image Captioning with Natural Language Inference", "abstract": "Generating descriptive sentences that convey non-trivial, detailed, and salient information about images is an important goal of image captioning. In this paper we propose a novel approach to encourage captioning models to produce more detailed captions using natural language inference, based on the motivation that, among different captions of an image, descriptive captions are more likely to entail less descriptive captions. Specifically, we construct directed inference graphs for reference captions based on natural language inference. A PageRank algorithm is then employed to estimate the descriptiveness score of each node. Built on that, we use reference sampling and weighted designated rewards to guide captioning to generate descriptive captions. The results on MSCOCO show that the proposed method outperforms the baselines significantly on a wide range of conventional and descriptiveness-related evaluation metrics.", "doc_id": "959a45d13a6d4f32b15ecdca509186f6", "publication_year": 2021, "sentences": ["generating descriptive sentences that convey non - trivial , detailed , and salient information about images is an important goal of image captioning .", "in this paper we propose a novel approach to encourage captioning models to produce more detailed captions using natural language inference , based on the motivation that , among different captions of an image , descriptive captions are more likely to entail less descriptive captions .", "specifically , we construct directed inference graphs for reference captions based on natural language inference .", "a pagerank algorithm is then employed to estimate the descriptiveness score of each node .", "built on that , we use reference sampling and weighted designated rewards to guide captioning to generate descriptive captions .", "the results on mscoco show that the proposed method outperforms the baselines significantly on a wide range of conventional and descriptiveness - related evaluation metrics ."], "events": [{"event_type": "ITT", "arguments": [{"text": "image captioning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["image", "captioning"], "offsets": [21, 22]}], "trigger": {"text": "important", "tokens": ["important"], "offsets": [18]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [27]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach"], "offsets": [31]}, {"text": "encourage", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["encourage"], "offsets": [33]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [28]}}, {"event_type": "PUR", "arguments": [{"text": "captioning models to produce more detailed captions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["captioning", "models", "to", "produce", "more", "detailed", "captions"], "offsets": [34, 35, 36, 37, 38, 39, 40]}], "trigger": {"text": "encourage", "tokens": ["encourage"], "offsets": [33]}}, {"event_type": "MDS", "arguments": [{"text": "natural language inference", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["natural", "language", "inference"], "offsets": [42, 43, 44]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [41]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [72]}, {"text": "directed inference graphs", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["directed", "inference", "graphs"], "offsets": [74, 75, 76]}, {"text": "reference captions", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["reference", "captions"], "offsets": [78, 79]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [73]}}, {"event_type": "WKS", "arguments": [{"text": "pagerank algorithm", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pagerank", "algorithm"], "offsets": [87, 88]}, {"text": "estimate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["estimate"], "offsets": [93]}], "trigger": {"text": "employed", "tokens": ["employed"], "offsets": [91]}}, {"event_type": "PUR", "arguments": [{"text": "descriptiveness score of each node", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["descriptiveness", "score", "of", "each", "node"], "offsets": [95, 96, 97, 98, 99]}], "trigger": {"text": "estimate", "tokens": ["estimate"], "offsets": [93]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [105]}, {"text": "reference sampling", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["reference", "sampling"], "offsets": [107, 108]}, {"text": "weighted designated rewards", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["weighted", "designated", "rewards"], "offsets": [110, 111, 112]}, {"text": "guide", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["guide"], "offsets": [114]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [106]}}, {"event_type": "PUR", "arguments": [{"text": "captioning to generate descriptive captions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["captioning", "to", "generate", "descriptive", "captions"], "offsets": [115, 116, 117, 118, 119]}], "trigger": {"text": "guide", "tokens": ["guide"], "offsets": [114]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [130]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [125]}}, {"event_type": "CMP", "arguments": [{"text": "approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["approach"], "offsets": [31]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [130]}, {"text": "baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baselines"], "offsets": [132]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [133]}, {"text": "wide range of conventional metrics", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["wide", "range", "of", "conventional", "metrics"], "offsets": [136, 137, 138, 139, 145]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [130]}}], "document": ["generating", "descriptive", "sentences", "that", "convey", "non", "-", "trivial", ",", "detailed", ",", "and", "salient", "information", "about", "images", "is", "an", "important", "goal", "of", "image", "captioning", ".", "in", "this", "paper", "we", "propose", "a", "novel", "approach", "to", "encourage", "captioning", "models", "to", "produce", "more", "detailed", "captions", "using", "natural", "language", "inference", ",", "based", "on", "the", "motivation", "that", ",", "among", "different", "captions", "of", "an", "image", ",", "descriptive", "captions", "are", "more", "likely", "to", "entail", "less", "descriptive", "captions", ".", "specifically", ",", "we", "construct", "directed", "inference", "graphs", "for", "reference", "captions", "based", "on", "natural", "language", "inference", ".", "a", "pagerank", "algorithm", "is", "then", "employed", "to", "estimate", "the", "descriptiveness", "score", "of", "each", "node", ".", "built", "on", "that", ",", "we", "use", "reference", "sampling", "and", "weighted", "designated", "rewards", "to", "guide", "captioning", "to", "generate", "descriptive", "captions", ".", "the", "results", "on", "mscoco", "show", "that", "the", "proposed", "method", "outperforms", "the", "baselines", "significantly", "on", "a", "wide", "range", "of", "conventional", "and", "descriptiveness", "-", "related", "evaluation", "metrics", "."]}, {"venue": "ACL", "title": "Poisoning Knowledge Graph Embeddings via Relation Inference Patterns", "abstract": "We study the problem of generating data poisoning attacks against Knowledge Graph Embedding (KGE) models for the task of link prediction in knowledge graphs. To poison KGE models, we propose to exploit their inductive abilities which are captured through the relationship patterns like symmetry, inversion and composition in the knowledge graph. Specifically, to degrade the model\u2019s prediction confidence on target facts, we propose to improve the model\u2019s prediction confidence on a set of decoy facts. Thus, we craft adversarial additions that can improve the model\u2019s prediction confidence on decoy facts through different inference patterns. Our experiments demonstrate that the proposed poisoning attacks outperform state-of-art baselines on four KGE models for two publicly available datasets. We also find that the symmetry pattern based attacks generalize across all model-dataset combinations which indicates the sensitivity of KGE models to this pattern.", "doc_id": "f8f37767d0d1ee68364fc26d42e77cde", "publication_year": 2021, "sentences": ["we study the problem of generating data poisoning attacks against knowledge graph embedding ( kge ) models for the task of link prediction in knowledge graphs .", "to poison kge models , we propose to exploit their inductive abilities which are captured through the relationship patterns like symmetry , inversion and composition in the knowledge graph .", "specifically , to degrade the model \u2019 s prediction confidence on target facts , we propose to improve the model \u2019 s prediction confidence on a set of decoy facts .", "thus , we craft adversarial additions that can improve the model \u2019 s prediction confidence on decoy facts through different inference patterns .", "our experiments demonstrate that the proposed poisoning attacks outperform state - of - art baselines on four kge models for two publicly available datasets .", "we also find that the symmetry pattern based attacks generalize across all model - dataset combinations which indicates the sensitivity of kge models to this pattern ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "problem of generating data poisoning attacks", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["problem", "of", "generating", "data", "poisoning", "attacks"], "offsets": [3, 4, 5, 6, 7, 8]}, {"text": "against knowledge graph embedding ( kge ) models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["against", "knowledge", "graph", "embedding", "(", "kge", ")", "models"], "offsets": [9, 10, 11, 12, 13, 14, 15, 16]}, {"text": "task of link prediction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["task", "of", "link", "prediction"], "offsets": [19, 20, 21, 22]}, {"text": "in knowledge graphs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "knowledge", "graphs"], "offsets": [23, 24, 25]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [1]}}, {"event_type": "MDS", "arguments": [{"text": "inductive abilities", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["inductive", "abilities"], "offsets": [37, 38]}, {"text": "relationship patterns", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["relationship", "patterns"], "offsets": [44, 45]}, {"text": "in the knowledge graph", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "knowledge", "graph"], "offsets": [52, 53, 54, 55]}, {"text": "poison", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["poison"], "offsets": [28]}], "trigger": {"text": "captured", "tokens": ["captured"], "offsets": [41]}}, {"event_type": "PUR", "arguments": [{"text": "kge models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["knowledge", "graph", "embedding", "models"], "offsets": [10, 11, 12, 30]}], "trigger": {"text": "poison", "tokens": ["poison"], "offsets": [28]}}, {"event_type": "MDS", "arguments": [{"text": "degrade", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["degrade"], "offsets": [60]}, {"text": "model \u2019 s prediction confidence", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["model", "\u2019", "s", "prediction", "confidence"], "offsets": [76, 77, 78, 79, 80]}, {"text": "set of decoy facts", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["set", "of", "decoy", "facts"], "offsets": [83, 84, 85, 86]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [74]}}, {"event_type": "PUR", "arguments": [{"text": "model \u2019 s prediction confidence", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["model", "\u2019", "s", "prediction", "confidence"], "offsets": [62, 63, 64, 65, 66]}, {"text": "on target facts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "target", "facts"], "offsets": [67, 68, 69]}], "trigger": {"text": "degrade", "tokens": ["degrade"], "offsets": [60]}}, {"event_type": "MDS", "arguments": [{"text": "adversarial additions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["adversarial", "additions"], "offsets": [92, 93]}, {"text": "different inference patterns", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["different", "inference", "patterns"], "offsets": [107, 108, 109]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [96]}], "trigger": {"text": "craft", "tokens": ["craft"], "offsets": [91]}}, {"event_type": "PUR", "arguments": [{"text": "model \u2019 s prediction confidence", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["model", "\u2019", "s", "prediction", "confidence"], "offsets": [98, 99, 100, 101, 102]}, {"text": "on decoy facts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "decoy", "facts"], "offsets": [103, 104, 105]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [96]}}, {"event_type": "CMP", "arguments": [{"text": "state - of - art baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "art", "baselines"], "offsets": [120, 121, 122, 123, 124, 125]}, {"text": "proposed poisoning attacks", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["proposed", "poisoning", "attacks"], "offsets": [116, 117, 118]}, {"text": "on four kge models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "four", "knowledge", "graph", "embedding", "models"], "offsets": [126, 127, 10, 11, 12, 129]}, {"text": "two publicly available datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "publicly", "available", "datasets"], "offsets": [131, 132, 133, 134]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [119]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [136]}, {"text": "generalize", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["generalize"], "offsets": [145]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [138]}}, {"event_type": "FAC", "arguments": [{"text": "symmetry pattern based attacks", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["symmetry", "pattern", "based", "attacks"], "offsets": [141, 142, 143, 144]}, {"text": "model - dataset combinations", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["model", "-", "dataset", "combinations"], "offsets": [148, 149, 150, 151]}], "trigger": {"text": "generalize", "tokens": ["generalize"], "offsets": [145]}}, {"event_type": "FAC", "arguments": [{"text": "sensitivity of kge models", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["sensitivity", "of", "knowledge", "graph", "embedding", "models"], "offsets": [155, 156, 10, 11, 12, 158]}], "trigger": {"text": "indicates", "tokens": ["indicates"], "offsets": [153]}}, {"event_type": "FIN", "arguments": [{"text": "outperform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperform"], "offsets": [119]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [113]}}], "document": ["we", "study", "the", "problem", "of", "generating", "data", "poisoning", "attacks", "against", "knowledge", "graph", "embedding", "(", "kge", ")", "models", "for", "the", "task", "of", "link", "prediction", "in", "knowledge", "graphs", ".", "to", "poison", "kge", "models", ",", "we", "propose", "to", "exploit", "their", "inductive", "abilities", "which", "are", "captured", "through", "the", "relationship", "patterns", "like", "symmetry", ",", "inversion", "and", "composition", "in", "the", "knowledge", "graph", ".", "specifically", ",", "to", "degrade", "the", "model", "\u2019", "s", "prediction", "confidence", "on", "target", "facts", ",", "we", "propose", "to", "improve", "the", "model", "\u2019", "s", "prediction", "confidence", "on", "a", "set", "of", "decoy", "facts", ".", "thus", ",", "we", "craft", "adversarial", "additions", "that", "can", "improve", "the", "model", "\u2019", "s", "prediction", "confidence", "on", "decoy", "facts", "through", "different", "inference", "patterns", ".", "our", "experiments", "demonstrate", "that", "the", "proposed", "poisoning", "attacks", "outperform", "state", "-", "of", "-", "art", "baselines", "on", "four", "kge", "models", "for", "two", "publicly", "available", "datasets", ".", "we", "also", "find", "that", "the", "symmetry", "pattern", "based", "attacks", "generalize", "across", "all", "model", "-", "dataset", "combinations", "which", "indicates", "the", "sensitivity", "of", "kge", "models", "to", "this", "pattern", "."]}, {"venue": "ACL", "title": "Context-specific Language Modeling for Human Trafficking Detection from Online Advertisements", "abstract": "Human trafficking is a worldwide crisis. Traffickers exploit their victims by anonymously offering sexual services through online advertisements. These ads often contain clues that law enforcement can use to separate out potential trafficking cases from volunteer sex advertisements. The problem is that the sheer volume of ads is too overwhelming for manual processing. Ideally, a centralized semi-automated tool can be used to assist law enforcement agencies with this task. Here, we present an approach using natural language processing to identify trafficking ads on these websites. We propose a classifier by integrating multiple text feature sets, including the publicly available pre-trained textual language model Bi-directional Encoder Representation from transformers (BERT). In this paper, we demonstrate that a classifier using this composite feature set has significantly better performance compared to any single feature set alone.", "doc_id": "250d544590945c4bd7b5a8da6c19302d", "publication_year": 2019, "sentences": ["human trafficking is a worldwide crisis .", "traffickers exploit their victims by anonymously offering sexual services through online advertisements .", "these ads often contain clues that law enforcement can use to separate out potential trafficking cases from volunteer sex advertisements .", "the problem is that the sheer volume of ads is too overwhelming for manual processing .", "ideally , a centralized semi - automated tool can be used to assist law enforcement agencies with this task .", "here , we present an approach using natural language processing to identify trafficking ads on these websites .", "we propose a classifier by integrating multiple text feature sets , including the publicly available pre - trained textual language model bi - directional encoder representation from transformers ( bert ) .", "in this paper , we demonstrate that a classifier using this composite feature set has significantly better performance compared to any single feature set alone ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [79]}, {"text": "approach using natural language processing", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach", "using", "natural", "language", "processing"], "offsets": [82, 83, 84, 85, 86]}, {"text": "identify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["identify"], "offsets": [88]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [80]}}, {"event_type": "PUR", "arguments": [{"text": "trafficking ads on these websites", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["trafficking", "ads", "on", "these", "websites"], "offsets": [89, 90, 91, 92, 93]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [88]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [95]}, {"text": "classifier", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["classifier"], "offsets": [98]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [96]}}, {"event_type": "MDS", "arguments": [{"text": "multiple text feature sets", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["multiple", "text", "feature", "sets"], "offsets": [101, 102, 103, 104]}], "trigger": {"text": "integrating", "tokens": ["integrating"], "offsets": [100]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [131]}, {"text": "has", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["has"], "offsets": [141]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [132]}}, {"event_type": "CMP", "arguments": [{"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [142]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [144]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [143]}, {"text": "any single feature set alone", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["any", "single", "feature", "set", "alone"], "offsets": [147, 148, 149, 150, 151]}, {"text": "composite feature set", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["composite", "feature", "set"], "offsets": [138, 139, 140]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [141]}}], "document": ["human", "trafficking", "is", "a", "worldwide", "crisis", ".", "traffickers", "exploit", "their", "victims", "by", "anonymously", "offering", "sexual", "services", "through", "online", "advertisements", ".", "these", "ads", "often", "contain", "clues", "that", "law", "enforcement", "can", "use", "to", "separate", "out", "potential", "trafficking", "cases", "from", "volunteer", "sex", "advertisements", ".", "the", "problem", "is", "that", "the", "sheer", "volume", "of", "ads", "is", "too", "overwhelming", "for", "manual", "processing", ".", "ideally", ",", "a", "centralized", "semi", "-", "automated", "tool", "can", "be", "used", "to", "assist", "law", "enforcement", "agencies", "with", "this", "task", ".", "here", ",", "we", "present", "an", "approach", "using", "natural", "language", "processing", "to", "identify", "trafficking", "ads", "on", "these", "websites", ".", "we", "propose", "a", "classifier", "by", "integrating", "multiple", "text", "feature", "sets", ",", "including", "the", "publicly", "available", "pre", "-", "trained", "textual", "language", "model", "bi", "-", "directional", "encoder", "representation", "from", "transformers", "(", "bert", ")", ".", "in", "this", "paper", ",", "we", "demonstrate", "that", "a", "classifier", "using", "this", "composite", "feature", "set", "has", "significantly", "better", "performance", "compared", "to", "any", "single", "feature", "set", "alone", "."]}, {"venue": "ACL", "title": "ExpBERT: Representation Engineering with Natural Language Explanations", "abstract": "Suppose we want to specify the inductive bias that married couples typically go on honeymoons for the task of extracting pairs of spouses from text. In this paper, we allow model developers to specify these types of inductive biases as natural language explanations. We use BERT fine-tuned on MultiNLI to \u201cinterpret\u201d these explanations with respect to the input sentence, producing explanation-guided representations of the input. Across three relation extraction tasks, our method, ExpBERT, matches a BERT baseline but with 3\u201320x less labeled data and improves on the baseline by 3\u201310 F1 points with the same amount of labeled data.", "doc_id": "09acccc05c621d0c5ca077b7304b0c6a", "publication_year": 2020, "sentences": ["suppose we want to specify the inductive bias that married couples typically go on honeymoons for the task of extracting pairs of spouses from text .", "in this paper , we allow model developers to specify these types of inductive biases as natural language explanations .", "we use bert fine - tuned on multinli to \u201c interpret \u201d these explanations with respect to the input sentence , producing explanation - guided representations of the input .", "across three relation extraction tasks , our method , expbert , matches a bert baseline but with 3 \u2013 20x less labeled data and improves on the baseline by 3 \u2013 10 f1 points with the same amount of labeled data ."], "events": [{"event_type": "ITT", "arguments": [{"text": "inductive bias", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["inductive", "bias"], "offsets": [6, 7]}], "trigger": {"text": "specify", "tokens": ["specify"], "offsets": [4]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [46]}, {"text": "bert fine - tuned on multinli", "nugget_type": "APP", "argument_type": "Content", "tokens": ["bert", "fine", "-", "tuned", "on", "multinli"], "offsets": [48, 49, 50, 51, 52, 53]}, {"text": "interpret", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["interpret"], "offsets": [56]}, {"text": "producing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["producing"], "offsets": [67]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [47]}}, {"event_type": "PUR", "arguments": [{"text": "explanations with respect to the input sentence", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["explanations", "with", "respect", "to", "the", "input", "sentence"], "offsets": [59, 60, 61, 62, 63, 64, 65]}], "trigger": {"text": "interpret", "tokens": ["interpret"], "offsets": [56]}}, {"event_type": "PUR", "arguments": [{"text": "explanation - guided representations of the input", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["explanation", "-", "guided", "representations", "of", "the", "input"], "offsets": [68, 69, 70, 71, 72, 73, 74]}], "trigger": {"text": "producing", "tokens": ["producing"], "offsets": [67]}}, {"event_type": "FAC", "arguments": [{"text": "expbert", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["expbert"], "offsets": [85]}, {"text": "bert baseline", "nugget_type": "APP", "argument_type": "Object", "tokens": ["bert", "baseline"], "offsets": [89, 90]}, {"text": "3 \u2013 20x less labeled data", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["3", "\u2013", "20x", "less", "labeled", "data"], "offsets": [93, 94, 95, 96, 97, 98]}, {"text": "across three relation extraction tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "three", "relation", "extraction", "tasks"], "offsets": [76, 77, 78, 79, 80]}], "trigger": {"text": "matches", "tokens": ["matches"], "offsets": [87]}}, {"event_type": "CMP", "arguments": [{"text": "expbert", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["expbert"], "offsets": [85]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [100]}, {"text": "baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baseline"], "offsets": [103]}, {"text": "3 \u2013 10", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["3", "\u2013", "10"], "offsets": [105, 106, 107]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1"], "offsets": [108]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [100]}}], "document": ["suppose", "we", "want", "to", "specify", "the", "inductive", "bias", "that", "married", "couples", "typically", "go", "on", "honeymoons", "for", "the", "task", "of", "extracting", "pairs", "of", "spouses", "from", "text", ".", "in", "this", "paper", ",", "we", "allow", "model", "developers", "to", "specify", "these", "types", "of", "inductive", "biases", "as", "natural", "language", "explanations", ".", "we", "use", "bert", "fine", "-", "tuned", "on", "multinli", "to", "\u201c", "interpret", "\u201d", "these", "explanations", "with", "respect", "to", "the", "input", "sentence", ",", "producing", "explanation", "-", "guided", "representations", "of", "the", "input", ".", "across", "three", "relation", "extraction", "tasks", ",", "our", "method", ",", "expbert", ",", "matches", "a", "bert", "baseline", "but", "with", "3", "\u2013", "20x", "less", "labeled", "data", "and", "improves", "on", "the", "baseline", "by", "3", "\u2013", "10", "f1", "points", "with", "the", "same", "amount", "of", "labeled", "data", "."]}, {"venue": "ACL", "title": "Reducing Gender Bias in Neural Machine Translation as a Domain Adaptation Problem", "abstract": "Training data for NLP tasks often exhibits gender bias in that fewer sentences refer to women than to men. In Neural Machine Translation (NMT) gender bias has been shown to reduce translation quality, particularly when the target language has grammatical gender. The recent WinoMT challenge set allows us to measure this effect directly (Stanovsky et al, 2019) Ideally we would reduce system bias by simply debiasing all data prior to training, but achieving this effectively is itself a challenge. Rather than attempt to create a \u2018balanced\u2019 dataset, we use transfer learning on a small set of trusted, gender-balanced examples. This approach gives strong and consistent improvements in gender debiasing with much less computational cost than training from scratch. A known pitfall of transfer learning on new domains is \u2018catastrophic forgetting\u2019, which we address at adaptation and inference time. During adaptation we show that Elastic Weight Consolidation allows a performance trade-off between general translation quality and bias reduction. At inference time we propose a lattice-rescoring scheme which outperforms all systems evaluated in Stanovsky et al, 2019 on WinoMT with no degradation of general test set BLEU. We demonstrate our approach translating from English into three languages with varied linguistic properties and data availability.", "doc_id": "977ad1891fa234b684f6c8e97161cb0a", "publication_year": 2020, "sentences": ["training data for nlp tasks often exhibits gender bias in that fewer sentences refer to women than to men .", "in neural machine translation ( nmt ) gender bias has been shown to reduce translation quality , particularly when the target language has grammatical gender .", "the recent winomt challenge set allows us to measure this effect directly ( stanovsky et al , 2019 )", "ideally we would reduce system bias by simply debiasing all data prior to training , but achieving this effectively is itself a challenge .", "rather than attempt to create a \u2018 balanced \u2019 dataset , we use transfer learning on a small set of trusted , gender - balanced examples .", "this approach gives strong and consistent improvements in gender debiasing with much less computational cost than training from scratch .", "a known pitfall of transfer learning on new domains is \u2018 catastrophic forgetting \u2019 , which we address at adaptation and inference time .", "during adaptation we show that elastic weight consolidation allows a performance trade - off between general translation quality and bias reduction .", "at inference time we propose a lattice - rescoring scheme which outperforms all systems evaluated in stanovsky et al , 2019 on winomt with no degradation of general test set bleu .", "we demonstrate our approach translating from english into three languages with varied linguistic properties and data availability ."], "events": [{"event_type": "ITT", "arguments": [{"text": "gender bias", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["gender", "bias"], "offsets": [7, 8]}], "trigger": {"text": "exhibits", "tokens": ["exhibits"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "reduce", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["reduce"], "offsets": [33]}, {"text": "translation quality", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["translation", "quality"], "offsets": [34, 35]}], "trigger": {"text": "reduce", "tokens": ["reduce"], "offsets": [33]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [100]}, {"text": "transfer learning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["transfer", "learning"], "offsets": [102, 103]}, {"text": "small set of trusted , gender - balanced examples", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["small", "set", "of", "trusted", ",", "gender", "-", "balanced", "examples"], "offsets": [106, 107, 108, 109, 110, 111, 112, 113, 114]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [101]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [152]}, {"text": "catastrophic forgetting", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["catastrophic", "forgetting"], "offsets": [147, 148]}, {"text": "at adaptation and inference time", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "adaptation", "and", "inference", "time"], "offsets": [154, 155, 156, 157, 158]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [153]}}, {"event_type": "FIN", "arguments": [{"text": "allows", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["allows"], "offsets": [168]}, {"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [162]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [163]}}, {"event_type": "FAC", "arguments": [{"text": "elastic weight consolidation", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["elastic", "weight", "consolidation"], "offsets": [165, 166, 167]}, {"text": "performance trade - off", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance", "trade", "-", "off"], "offsets": [170, 171, 172, 173]}, {"text": "between general translation quality and bias reduction", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "general", "translation", "quality", "and", "bias", "reduction"], "offsets": [174, 175, 176, 177, 178, 179, 180]}], "trigger": {"text": "allows", "tokens": ["allows"], "offsets": [168]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [185]}, {"text": "lattice - rescoring scheme", "nugget_type": "APP", "argument_type": "Content", "tokens": ["lattice", "-", "rescoring", "scheme"], "offsets": [188, 189, 190, 191]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [186]}}, {"event_type": "CMP", "arguments": [{"text": "lattice - rescoring scheme", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["lattice", "-", "rescoring", "scheme"], "offsets": [188, 189, 190, 191]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [193]}, {"text": "all systems evaluated in stanovsky et al , 2019", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["all", "systems", "evaluated", "in", "stanovsky", "et", "al", ",", "2019"], "offsets": [194, 195, 196, 197, 198, 199, 200, 201, 202]}, {"text": "on winomt", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "winomt"], "offsets": [203, 204]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [193]}}, {"event_type": "FAC", "arguments": [{"text": "approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["approach"], "offsets": [217]}, {"text": "translating from english into three languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["translating", "from", "english", "into", "three", "languages"], "offsets": [218, 219, 220, 221, 222, 223]}, {"text": "varied linguistic properties", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["varied", "linguistic", "properties"], "offsets": [225, 226, 227]}, {"text": "data availability", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["data", "availability"], "offsets": [229, 230]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [215]}}], "document": ["training", "data", "for", "nlp", "tasks", "often", "exhibits", "gender", "bias", "in", "that", "fewer", "sentences", "refer", "to", "women", "than", "to", "men", ".", "in", "neural", "machine", "translation", "(", "nmt", ")", "gender", "bias", "has", "been", "shown", "to", "reduce", "translation", "quality", ",", "particularly", "when", "the", "target", "language", "has", "grammatical", "gender", ".", "the", "recent", "winomt", "challenge", "set", "allows", "us", "to", "measure", "this", "effect", "directly", "(", "stanovsky", "et", "al", ",", "2019", ")", "ideally", "we", "would", "reduce", "system", "bias", "by", "simply", "debiasing", "all", "data", "prior", "to", "training", ",", "but", "achieving", "this", "effectively", "is", "itself", "a", "challenge", ".", "rather", "than", "attempt", "to", "create", "a", "\u2018", "balanced", "\u2019", "dataset", ",", "we", "use", "transfer", "learning", "on", "a", "small", "set", "of", "trusted", ",", "gender", "-", "balanced", "examples", ".", "this", "approach", "gives", "strong", "and", "consistent", "improvements", "in", "gender", "debiasing", "with", "much", "less", "computational", "cost", "than", "training", "from", "scratch", ".", "a", "known", "pitfall", "of", "transfer", "learning", "on", "new", "domains", "is", "\u2018", "catastrophic", "forgetting", "\u2019", ",", "which", "we", "address", "at", "adaptation", "and", "inference", "time", ".", "during", "adaptation", "we", "show", "that", "elastic", "weight", "consolidation", "allows", "a", "performance", "trade", "-", "off", "between", "general", "translation", "quality", "and", "bias", "reduction", ".", "at", "inference", "time", "we", "propose", "a", "lattice", "-", "rescoring", "scheme", "which", "outperforms", "all", "systems", "evaluated", "in", "stanovsky", "et", "al", ",", "2019", "on", "winomt", "with", "no", "degradation", "of", "general", "test", "set", "bleu", ".", "we", "demonstrate", "our", "approach", "translating", "from", "english", "into", "three", "languages", "with", "varied", "linguistic", "properties", "and", "data", "availability", "."]}, {"venue": "ACL", "title": "Discriminative Reranking for Neural Machine Translation", "abstract": "Reranking models enable the integration of rich features to select a better output hypothesis within an n-best list or lattice. These models have a long history in NLP, and we revisit discriminative reranking for modern neural machine translation models by training a large transformer architecture. This takes as input both the source sentence as well as a list of hypotheses to output a ranked list. The reranker is trained to predict the observed distribution of a desired metric, e.g. BLEU, over the n-best list. Since such a discriminator contains hundreds of millions of parameters, we improve its generalization using pre-training and data augmentation techniques. Experiments on four WMT directions show that our discriminative reranking approach is effective and complementary to existing generative reranking approaches, yielding improvements of up to 4 BLEU over the beam search output.", "doc_id": "b0b0b733e89d13d29533cbb91208ef93", "publication_year": 2021, "sentences": ["reranking models enable the integration of rich features to select a better output hypothesis within an n - best list or lattice .", "these models have a long history in nlp , and we revisit discriminative reranking for modern neural machine translation models by training a large transformer architecture .", "this takes as input both the source sentence as well as a list of hypotheses to output a ranked list .", "the reranker is trained to predict the observed distribution of a desired metric , e . g . bleu , over the n - best list .", "since such a discriminator contains hundreds of millions of parameters , we improve its generalization using pre - training and data augmentation techniques .", "experiments on four wmt directions show that our discriminative reranking approach is effective and complementary to existing generative reranking approaches , yielding improvements of up to 4 bleu over the beam search output ."], "events": [{"event_type": "ITT", "arguments": [{"text": "reranking models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["reranking", "models"], "offsets": [0, 1]}], "trigger": {"text": "enable", "tokens": ["enable"], "offsets": [2]}}, {"event_type": "MDS", "arguments": [{"text": "large transformer architecture", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["large", "transformer", "architecture"], "offsets": [46, 47, 48]}, {"text": "revisit", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["revisit"], "offsets": [34]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [44]}}, {"event_type": "PUR", "arguments": [{"text": "discriminative reranking for modern neural machine translation models", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["discriminative", "reranking", "for", "modern", "neural", "machine", "translation", "models"], "offsets": [35, 36, 37, 38, 39, 40, 41, 42]}], "trigger": {"text": "revisit", "tokens": ["revisit"], "offsets": [34]}}, {"event_type": "MDS", "arguments": [{"text": "source sentence", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["source", "sentence"], "offsets": [56, 57]}, {"text": "list of hypotheses", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["list", "of", "hypotheses"], "offsets": [62, 63, 64]}, {"text": "output", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["output"], "offsets": [66]}], "trigger": {"text": "input", "tokens": ["input"], "offsets": [53]}}, {"event_type": "PUR", "arguments": [{"text": "ranked list", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["ranked", "list"], "offsets": [68, 69]}], "trigger": {"text": "output", "tokens": ["output"], "offsets": [66]}}, {"event_type": "MDS", "arguments": [{"text": "reranker", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["reranker"], "offsets": [72]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [76]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [74]}}, {"event_type": "PUR", "arguments": [{"text": "observed distribution of a desired metric", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["observed", "distribution", "of", "a", "desired", "metric"], "offsets": [78, 79, 80, 81, 82, 83]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [76]}}, {"event_type": "MDS", "arguments": [{"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [110]}, {"text": "pre - training", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["pre", "-", "training"], "offsets": [114, 115, 116]}, {"text": "data augmentation techniques", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["data", "augmentation", "techniques"], "offsets": [118, 119, 120]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [113]}}, {"event_type": "PUR", "arguments": [{"text": "its generalization", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["discriminator", "generalization"], "offsets": [101, 112]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [110]}}, {"event_type": "FAC", "arguments": [{"text": "our discriminative reranking approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["our", "discriminative", "reranking", "approach"], "offsets": [129, 130, 131, 132]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [134]}}, {"event_type": "FAC", "arguments": [{"text": "our discriminative reranking approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["our", "discriminative", "reranking", "approach"], "offsets": [129, 130, 131, 132]}, {"text": "existing generative reranking approaches", "nugget_type": "APP", "argument_type": "Object", "tokens": ["existing", "generative", "reranking", "approaches"], "offsets": [138, 139, 140, 141]}], "trigger": {"text": "complementary", "tokens": ["complementary"], "offsets": [136]}}, {"event_type": "CMP", "arguments": [{"text": "our discriminative reranking approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["our", "discriminative", "reranking", "approach"], "offsets": [129, 130, 131, 132]}, {"text": "improvements", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["improvements"], "offsets": [144]}, {"text": "up to 4 bleu", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["up", "to", "4", "bleu"], "offsets": [146, 147, 148, 149]}, {"text": "beam search output", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["beam", "search", "output"], "offsets": [152, 153, 154]}], "trigger": {"text": "yielding", "tokens": ["yielding"], "offsets": [143]}}], "document": ["reranking", "models", "enable", "the", "integration", "of", "rich", "features", "to", "select", "a", "better", "output", "hypothesis", "within", "an", "n", "-", "best", "list", "or", "lattice", ".", "these", "models", "have", "a", "long", "history", "in", "nlp", ",", "and", "we", "revisit", "discriminative", "reranking", "for", "modern", "neural", "machine", "translation", "models", "by", "training", "a", "large", "transformer", "architecture", ".", "this", "takes", "as", "input", "both", "the", "source", "sentence", "as", "well", "as", "a", "list", "of", "hypotheses", "to", "output", "a", "ranked", "list", ".", "the", "reranker", "is", "trained", "to", "predict", "the", "observed", "distribution", "of", "a", "desired", "metric", ",", "e", ".", "g", ".", "bleu", ",", "over", "the", "n", "-", "best", "list", ".", "since", "such", "a", "discriminator", "contains", "hundreds", "of", "millions", "of", "parameters", ",", "we", "improve", "its", "generalization", "using", "pre", "-", "training", "and", "data", "augmentation", "techniques", ".", "experiments", "on", "four", "wmt", "directions", "show", "that", "our", "discriminative", "reranking", "approach", "is", "effective", "and", "complementary", "to", "existing", "generative", "reranking", "approaches", ",", "yielding", "improvements", "of", "up", "to", "4", "bleu", "over", "the", "beam", "search", "output", "."]}, {"venue": "ACL", "title": "Tetra-Tagging: Word-Synchronous Parsing with Linear-Time Inference", "abstract": "We present a constituency parsing algorithm that, like a supertagger, works by assigning labels to each word in a sentence. In order to maximally leverage current neural architectures, the model scores each word\u2019s tags in parallel, with minimal task-specific structure. After scoring, a left-to-right reconciliation phase extracts a tree in (empirically) linear time. Our parser achieves 95.4 F1 on the WSJ test set while also achieving substantial speedups compared to current state-of-the-art parsers with comparable accuracies.", "doc_id": "d85dc94fb8aaf894a80a3d0590b9c5fa", "publication_year": 2020, "sentences": ["we present a constituency parsing algorithm that , like a supertagger , works by assigning labels to each word in a sentence .", "in order to maximally leverage current neural architectures , the model scores each word \u2019 s tags in parallel , with minimal task - specific structure .", "after scoring , a left - to - right reconciliation phase extracts a tree in ( empirically ) linear time .", "our parser achieves 95 . 4 f1 on the wsj test set while also achieving substantial speedups compared to current state - of - the - art parsers with comparable accuracies ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "constituency parsing algorithm", "nugget_type": "APP", "argument_type": "Content", "tokens": ["constituency", "parsing", "algorithm"], "offsets": [3, 4, 5]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "MDS", "arguments": [{"text": "each word in a sentence", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["each", "word", "in", "a", "sentence"], "offsets": [17, 18, 19, 20, 21]}, {"text": "labels", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["labels"], "offsets": [15]}], "trigger": {"text": "assigning", "tokens": ["assigning"], "offsets": [14]}}, {"event_type": "PUR", "arguments": [{"text": "current neural architectures", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["current", "neural", "architectures"], "offsets": [28, 29, 30]}], "trigger": {"text": "maximally leverage", "tokens": ["maximally", "leverage"], "offsets": [26, 27]}}, {"event_type": "FAC", "arguments": [{"text": "constituency parsing algorithm", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["constituency", "parsing", "algorithm"], "offsets": [3, 4, 5]}, {"text": "95 . 4", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["95", ".", "4"], "offsets": [74, 75, 76]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["f1"], "offsets": [77]}, {"text": "wsj test set", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["wsj", "test", "set"], "offsets": [80, 81, 82]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [73]}}, {"event_type": "CMP", "arguments": [{"text": "constituency parsing algorithm", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["constituency", "parsing", "algorithm"], "offsets": [3, 4, 5]}, {"text": "current state - of - the - art parsers", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "state", "-", "of", "-", "the", "-", "art", "parsers"], "offsets": [90, 91, 92, 93, 94, 95, 96, 97, 98]}, {"text": "substantial speedups", "nugget_type": "STR", "argument_type": "Result", "tokens": ["substantial", "speedups"], "offsets": [86, 87]}, {"text": "comparable accuracies", "nugget_type": "STR", "argument_type": "Result", "tokens": ["comparable", "accuracies"], "offsets": [100, 101]}], "trigger": {"text": "achieving", "tokens": ["achieving"], "offsets": [85]}}, {"event_type": "MDS", "arguments": [{"text": "maximally leverage", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["maximally", "leverage"], "offsets": [26, 27]}, {"text": "each word \u2019 s tags", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["each", "word", "\u2019", "s", "tags"], "offsets": [35, 36, 37, 38, 39]}, {"text": "minimal task - specific structure", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["minimal", "task", "-", "specific", "structure"], "offsets": [44, 45, 46, 47, 48]}], "trigger": {"text": "scores", "tokens": ["scores"], "offsets": [34]}}, {"event_type": "MDS", "arguments": [{"text": "left - to - right reconciliation phase", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["left", "-", "to", "-", "right", "reconciliation", "phase"], "offsets": [54, 55, 56, 57, 58, 59, 60]}, {"text": "tree", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["tree"], "offsets": [63]}, {"text": "in ( empirically ) linear time", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "linear", "time"], "offsets": [64, 68, 69]}], "trigger": {"text": "extracts", "tokens": ["extracts"], "offsets": [61]}}], "document": ["we", "present", "a", "constituency", "parsing", "algorithm", "that", ",", "like", "a", "supertagger", ",", "works", "by", "assigning", "labels", "to", "each", "word", "in", "a", "sentence", ".", "in", "order", "to", "maximally", "leverage", "current", "neural", "architectures", ",", "the", "model", "scores", "each", "word", "\u2019", "s", "tags", "in", "parallel", ",", "with", "minimal", "task", "-", "specific", "structure", ".", "after", "scoring", ",", "a", "left", "-", "to", "-", "right", "reconciliation", "phase", "extracts", "a", "tree", "in", "(", "empirically", ")", "linear", "time", ".", "our", "parser", "achieves", "95", ".", "4", "f1", "on", "the", "wsj", "test", "set", "while", "also", "achieving", "substantial", "speedups", "compared", "to", "current", "state", "-", "of", "-", "the", "-", "art", "parsers", "with", "comparable", "accuracies", "."]}, {"venue": "ACL", "title": "Generating Scientific Definitions with Controllable Complexity", "abstract": "Unfamiliar terminology and complex language can present barriers to understanding science. Natural language processing stands to help address these issues by automatically defining unfamiliar terms. We introduce a new task and dataset for defining scientific terms and controlling the complexity of generated definitions as a way of adapting to a specific reader\u2019s background knowledge. We test four definition generation methods for this new task, finding that a sequence-to-sequence approach is most successful. We then explore the version of the task in which definitions are generated at a target complexity level. We introduce a novel reranking approach and find in human evaluations that it offers superior fluency while also controlling complexity, compared to several controllable generation baselines.", "doc_id": "775bb58fa602dfaadae8cf75bdb07c21", "publication_year": 2022, "sentences": ["unfamiliar terminology and complex language can present barriers to understanding science .", "natural language processing stands to help address these issues by automatically defining unfamiliar terms .", "we introduce a new task and dataset for defining scientific terms and controlling the complexity of generated definitions as a way of adapting to a specific reader \u2019 s background knowledge .", "we test four definition generation methods for this new task , finding that a sequence - to - sequence approach is most successful .", "we then explore the version of the task in which definitions are generated at a target complexity level .", "we introduce a novel reranking approach and find in human evaluations that it offers superior fluency while also controlling complexity , compared to several controllable generation baselines ."], "events": [{"event_type": "RWF", "arguments": [{"text": "barriers", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["barriers"], "offsets": [7]}, {"text": "unfamiliar terminology", "nugget_type": "WEA", "argument_type": "Concern", "tokens": ["unfamiliar", "terminology"], "offsets": [0, 1]}, {"text": "complex language", "nugget_type": "WEA", "argument_type": "Concern", "tokens": ["complex", "language"], "offsets": [3, 4]}, {"text": "understanding", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["understanding"], "offsets": [9]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [6]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [27]}, {"text": "task", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["task"], "offsets": [31]}, {"text": "dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["dataset"], "offsets": [33]}, {"text": "defining", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["defining"], "offsets": [35]}, {"text": "controlling", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["controlling"], "offsets": [39]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [28]}}, {"event_type": "PUR", "arguments": [{"text": "scientific terms", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["scientific", "terms"], "offsets": [36, 37]}], "trigger": {"text": "defining", "tokens": ["defining"], "offsets": [35]}}, {"event_type": "PUR", "arguments": [{"text": "complexity of generated definitions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["complexity", "of", "generated", "definitions"], "offsets": [41, 42, 43, 44]}], "trigger": {"text": "controlling", "tokens": ["controlling"], "offsets": [39]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [59]}, {"text": "four definition generation methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["four", "definition", "generation", "methods"], "offsets": [61, 62, 63, 64]}], "trigger": {"text": "test", "tokens": ["test"], "offsets": [60]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [59]}, {"text": "most successful", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["most", "successful"], "offsets": [80, 81]}], "trigger": {"text": "finding", "tokens": ["finding"], "offsets": [70]}}, {"event_type": "FAC", "arguments": [{"text": "sequence - to - sequence approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["sequence", "-", "to", "-", "sequence", "approach"], "offsets": [73, 74, 75, 76, 77, 78]}], "trigger": {"text": "most successful", "tokens": ["most", "successful"], "offsets": [80, 81]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [83]}, {"text": "version of the task", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["version", "of", "the", "task"], "offsets": [87, 88, 89, 90]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [85]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [102]}, {"text": "reranking approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["reranking", "approach"], "offsets": [106, 107]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [103]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [102]}, {"text": "offers", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["offers"], "offsets": [115]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [109]}}, {"event_type": "CMP", "arguments": [{"text": "in human evaluations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "human", "evaluations"], "offsets": [110, 111, 112]}, {"text": "reranking approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["reranking", "approach"], "offsets": [106, 107]}, {"text": "superior fluency", "nugget_type": "STR", "argument_type": "Result", "tokens": ["superior", "fluency"], "offsets": [116, 117]}, {"text": "while also controlling complexity", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "also", "controlling", "complexity"], "offsets": [118, 119, 120, 121]}, {"text": "controllable generation baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["controllable", "generation", "baselines"], "offsets": [126, 127, 128]}], "trigger": {"text": "offers", "tokens": ["offers"], "offsets": [115]}}, {"event_type": "PUR", "arguments": [{"text": "science", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["science"], "offsets": [10]}], "trigger": {"text": "understanding", "tokens": ["understanding"], "offsets": [9]}}], "document": ["unfamiliar", "terminology", "and", "complex", "language", "can", "present", "barriers", "to", "understanding", "science", ".", "natural", "language", "processing", "stands", "to", "help", "address", "these", "issues", "by", "automatically", "defining", "unfamiliar", "terms", ".", "we", "introduce", "a", "new", "task", "and", "dataset", "for", "defining", "scientific", "terms", "and", "controlling", "the", "complexity", "of", "generated", "definitions", "as", "a", "way", "of", "adapting", "to", "a", "specific", "reader", "\u2019", "s", "background", "knowledge", ".", "we", "test", "four", "definition", "generation", "methods", "for", "this", "new", "task", ",", "finding", "that", "a", "sequence", "-", "to", "-", "sequence", "approach", "is", "most", "successful", ".", "we", "then", "explore", "the", "version", "of", "the", "task", "in", "which", "definitions", "are", "generated", "at", "a", "target", "complexity", "level", ".", "we", "introduce", "a", "novel", "reranking", "approach", "and", "find", "in", "human", "evaluations", "that", "it", "offers", "superior", "fluency", "while", "also", "controlling", "complexity", ",", "compared", "to", "several", "controllable", "generation", "baselines", "."]}, {"venue": "ACL", "title": "Problems with Cosine as a Measure of Embedding Similarity for High Frequency Words", "abstract": "Cosine similarity of contextual embeddings is used in many NLP tasks (e.g., QA, IR, MT) and metrics (e.g., BERTScore). Here, we uncover systematic ways in which word similarities estimated by cosine over BERT embeddings are understated and trace this effect to training data frequency. We find that relative to human judgements, cosine similarity underestimates the similarity of frequent words with other instances of the same word or other words across contexts, even after controlling for polysemy and other factors. We conjecture that this underestimation of similarity for high frequency words is due to differences in the representational geometry of high and low frequency words and provide a formal argument for the two-dimensional case.", "doc_id": "1759d922b368c620a5af83487e245b20", "publication_year": 2022, "sentences": ["cosine similarity of contextual embeddings is used in many nlp tasks ( e . g . , qa , ir , mt ) and metrics ( e . g . , bertscore ) .", "here , we uncover systematic ways in which word similarities estimated by cosine over bert embeddings are understated and trace this effect to training data frequency .", "we find that relative to human judgements , cosine similarity underestimates the similarity of frequent words with other instances of the same word or other words across contexts , even after controlling for polysemy and other factors .", "we conjecture that this underestimation of similarity for high frequency words is due to differences in the representational geometry of high and low frequency words and provide a formal argument for the two - dimensional case ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [36]}, {"text": "training", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["training"], "offsets": [57]}, {"text": "effect", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["effect"], "offsets": [55]}], "trigger": {"text": "trace", "tokens": ["trace"], "offsets": [53]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [61]}, {"text": "underestimates", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["underestimates"], "offsets": [71]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [62]}}, {"event_type": "CMP", "arguments": [{"text": "human judgements", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["human", "judgements"], "offsets": [66, 67]}, {"text": "cosine similarity", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["cosine", "similarity"], "offsets": [69, 70]}, {"text": "similarity of frequent words", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["similarity", "of", "frequent", "words"], "offsets": [73, 74, 75, 76]}], "trigger": {"text": "underestimates", "tokens": ["underestimates"], "offsets": [71]}}, {"event_type": "ITT", "arguments": [{"text": "nlp tasks ( e . g . , qa , ir , mt ) and metrics", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nlp", "tasks", "metrics"], "offsets": [9, 10, 24]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [6]}}, {"event_type": "FAC", "arguments": [{"text": "underestimation of similarity for high frequency words", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["underestimation", "of", "similarity", "for", "high", "frequency", "words"], "offsets": [103, 104, 105, 106, 107, 108, 109]}, {"text": "formal argument", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["formal", "argument"], "offsets": [127, 128]}, {"text": "two - dimensional case", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["two", "-", "dimensional", "case"], "offsets": [131, 132, 133, 134]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [125]}}, {"event_type": "PUR", "arguments": [{"text": "data frequency", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["data", "frequency"], "offsets": [58, 59]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [57]}}, {"event_type": "MDS", "arguments": [{"text": "word similarities", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["word", "similarities"], "offsets": [42, 43]}, {"text": "cosine", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["cosine"], "offsets": [46]}, {"text": "over bert embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "bert", "embeddings"], "offsets": [47, 48, 49]}], "trigger": {"text": "estimated", "tokens": ["estimated"], "offsets": [44]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [36]}, {"text": "systematic ways", "nugget_type": "APP", "argument_type": "Content", "tokens": ["systematic", "ways"], "offsets": [38, 39]}], "trigger": {"text": "uncover", "tokens": ["uncover"], "offsets": [37]}}], "document": ["cosine", "similarity", "of", "contextual", "embeddings", "is", "used", "in", "many", "nlp", "tasks", "(", "e", ".", "g", ".", ",", "qa", ",", "ir", ",", "mt", ")", "and", "metrics", "(", "e", ".", "g", ".", ",", "bertscore", ")", ".", "here", ",", "we", "uncover", "systematic", "ways", "in", "which", "word", "similarities", "estimated", "by", "cosine", "over", "bert", "embeddings", "are", "understated", "and", "trace", "this", "effect", "to", "training", "data", "frequency", ".", "we", "find", "that", "relative", "to", "human", "judgements", ",", "cosine", "similarity", "underestimates", "the", "similarity", "of", "frequent", "words", "with", "other", "instances", "of", "the", "same", "word", "or", "other", "words", "across", "contexts", ",", "even", "after", "controlling", "for", "polysemy", "and", "other", "factors", ".", "we", "conjecture", "that", "this", "underestimation", "of", "similarity", "for", "high", "frequency", "words", "is", "due", "to", "differences", "in", "the", "representational", "geometry", "of", "high", "and", "low", "frequency", "words", "and", "provide", "a", "formal", "argument", "for", "the", "two", "-", "dimensional", "case", "."]}, {"venue": "ACL", "title": "Can Transformer Models Measure Coherence In Text: Re-Thinking the Shuffle Test", "abstract": "The Shuffle Test is the most common task to evaluate whether NLP models can measure coherence in text. Most recent work uses direct supervision on the task; we show that by simply finetuning a RoBERTa model, we can achieve a near perfect accuracy of 97.8%, a state-of-the-art. We argue that this outstanding performance is unlikely to lead to a good model of text coherence, and suggest that the Shuffle Test should be approached in a Zero-Shot setting: models should be evaluated without being trained on the task itself. We evaluate common models in this setting, such as Generative and Bi-directional Transformers, and find that larger architectures achieve high-performance out-of-the-box. Finally, we suggest the k-Block Shuffle Test, a modification of the original by increasing the size of blocks shuffled. Even though human reader performance remains high (around 95% accuracy), model performance drops from 94% to 78% as block size increases, creating a conceptually simple challenge to benchmark NLP models.", "doc_id": "a937cbdfd88465bc7c6e136e33a73240", "publication_year": 2021, "sentences": ["the shuffle test is the most common task to evaluate whether nlp models can measure coherence in text .", "most recent work uses direct supervision on the task ; we show that by simply finetuning a roberta model , we can achieve a near perfect accuracy of 97 . 8 % , a state - of - the - art .", "we argue that this outstanding performance is unlikely to lead to a good model of text coherence , and suggest that the shuffle test should be approached in a zero - shot setting : models should be evaluated without being trained on the task itself .", "we evaluate common models in this setting , such as generative and bi - directional transformers , and find that larger architectures achieve high - performance out - of - the - box .", "finally , we suggest the k - block shuffle test , a modification of the original by increasing the size of blocks shuffled .", "even though human reader performance remains high ( around 95 % accuracy ) , model performance drops from 94 % to 78 % as block size increases , creating a conceptually simple challenge to benchmark nlp models ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [29]}, {"text": "roberta model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["roberta", "model"], "offsets": [36, 37]}], "trigger": {"text": "finetuning", "tokens": ["finetuning"], "offsets": [34]}}, {"event_type": "FAC", "arguments": [{"text": "roberta model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["roberta", "model"], "offsets": [36, 37]}, {"text": "97 . 8 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["97", ".", "8", "%"], "offsets": [47, 48, 49, 50]}, {"text": "near perfect accuracy", "nugget_type": "STR", "argument_type": "Object", "tokens": ["near", "perfect", "accuracy"], "offsets": [43, 44, 45]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [41]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [107]}, {"text": "common models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["common", "models"], "offsets": [109, 110]}, {"text": "in this setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "zero", "-", "shot", "setting"], "offsets": [111, 90, 91, 92, 93]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [108]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [107]}, {"text": "achieve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieve"], "offsets": [129]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [125]}}, {"event_type": "FAC", "arguments": [{"text": "larger architectures", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["larger", "architectures"], "offsets": [127, 128]}, {"text": "high - performance out - of - the - box", "nugget_type": "STR", "argument_type": "Object", "tokens": ["high", "-", "performance", "out", "-", "of", "-", "the", "-", "box"], "offsets": [130, 131, 132, 133, 134, 135, 136, 137, 138, 139]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [129]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [143]}, {"text": "k - block shuffle test", "nugget_type": "APP", "argument_type": "Content", "tokens": ["k", "-", "block", "shuffle", "test"], "offsets": [146, 147, 148, 149, 150]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [144]}}, {"event_type": "ITT", "arguments": [{"text": "nlp models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nlp", "models"], "offsets": [11, 12]}], "trigger": {"text": "measure", "tokens": ["measure"], "offsets": [14]}}, {"event_type": "FAC", "arguments": [{"text": "model performance", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["model", "performance"], "offsets": [179, 180]}, {"text": "from 94 %", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "94", "%"], "offsets": [182, 183, 184]}, {"text": "78 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["78", "%"], "offsets": [186, 187]}, {"text": "as block size increases", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "block", "size", "increases"], "offsets": [188, 189, 190, 191]}, {"text": "creating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["creating"], "offsets": [193]}], "trigger": {"text": "drops", "tokens": ["drops"], "offsets": [181]}}, {"event_type": "PUR", "arguments": [{"text": "conceptually simple challenge", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["conceptually", "simple", "challenge"], "offsets": [195, 196, 197]}, {"text": "to benchmark nlp models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "benchmark", "nlp", "models"], "offsets": [198, 199, 200, 201]}], "trigger": {"text": "creating", "tokens": ["creating"], "offsets": [193]}}], "document": ["the", "shuffle", "test", "is", "the", "most", "common", "task", "to", "evaluate", "whether", "nlp", "models", "can", "measure", "coherence", "in", "text", ".", "most", "recent", "work", "uses", "direct", "supervision", "on", "the", "task", ";", "we", "show", "that", "by", "simply", "finetuning", "a", "roberta", "model", ",", "we", "can", "achieve", "a", "near", "perfect", "accuracy", "of", "97", ".", "8", "%", ",", "a", "state", "-", "of", "-", "the", "-", "art", ".", "we", "argue", "that", "this", "outstanding", "performance", "is", "unlikely", "to", "lead", "to", "a", "good", "model", "of", "text", "coherence", ",", "and", "suggest", "that", "the", "shuffle", "test", "should", "be", "approached", "in", "a", "zero", "-", "shot", "setting", ":", "models", "should", "be", "evaluated", "without", "being", "trained", "on", "the", "task", "itself", ".", "we", "evaluate", "common", "models", "in", "this", "setting", ",", "such", "as", "generative", "and", "bi", "-", "directional", "transformers", ",", "and", "find", "that", "larger", "architectures", "achieve", "high", "-", "performance", "out", "-", "of", "-", "the", "-", "box", ".", "finally", ",", "we", "suggest", "the", "k", "-", "block", "shuffle", "test", ",", "a", "modification", "of", "the", "original", "by", "increasing", "the", "size", "of", "blocks", "shuffled", ".", "even", "though", "human", "reader", "performance", "remains", "high", "(", "around", "95", "%", "accuracy", ")", ",", "model", "performance", "drops", "from", "94", "%", "to", "78", "%", "as", "block", "size", "increases", ",", "creating", "a", "conceptually", "simple", "challenge", "to", "benchmark", "nlp", "models", "."]}, {"venue": "ACL", "title": "Handling Divergent Reference Texts when Evaluating Table-to-Text Generation", "abstract": "Automatically constructed datasets for generating text from semi-structured data (tables), such as WikiBio, often contain reference texts that diverge from the information in the corresponding semi-structured data. We show that metrics which rely solely on the reference texts, such as BLEU and ROUGE, show poor correlation with human judgments when those references diverge. We propose a new metric, PARENT, which aligns n-grams from the reference and generated texts to the semi-structured data before computing their precision and recall. Through a large scale human evaluation study of table-to-text models for WikiBio, we show that PARENT correlates with human judgments better than existing text generation metrics. We also adapt and evaluate the information extraction based evaluation proposed by Wiseman et al (2017), and show that PARENT has comparable correlation to it, while being easier to use. We show that PARENT is also applicable when the reference texts are elicited from humans using the data from the WebNLG challenge.", "doc_id": "fd18ae00049b6fda4fc2badf37a1ac0d", "publication_year": 2019, "sentences": ["automatically constructed datasets for generating text from semi - structured data ( tables ) , such as wikibio , often contain reference texts that diverge from the information in the corresponding semi - structured data .", "we show that metrics which rely solely on the reference texts , such as bleu and rouge , show poor correlation with human judgments when those references diverge .", "we propose a new metric , parent , which aligns n - grams from the reference and generated texts to the semi - structured data before computing their precision and recall .", "through a large scale human evaluation study of table - to - text models for wikibio , we show that parent correlates with human judgments better than existing text generation metrics .", "we also adapt and evaluate the information extraction based evaluation proposed by wiseman et al ( 2017 ) , and show that parent has comparable correlation to it , while being easier to use .", "we show that parent is also applicable when the reference texts are elicited from humans using the data from the webnlg challenge ."], "events": [{"event_type": "ITT", "arguments": [{"text": "automatically constructed datasets", "nugget_type": "DST", "argument_type": "Target", "tokens": ["automatically", "constructed", "datasets"], "offsets": [0, 1, 2]}], "trigger": {"text": "contain", "tokens": ["contain"], "offsets": [20]}}, {"event_type": "RWF", "arguments": [{"text": "metrics", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["metrics"], "offsets": [39]}, {"text": "which rely solely on the reference texts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["which", "rely", "solely", "on", "the", "reference", "texts"], "offsets": [40, 41, 42, 43, 44, 45, 46]}, {"text": "when those references diverge", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "those", "references", "diverge"], "offsets": [60, 61, 62, 63]}, {"text": "poor correlation with human judgments", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["poor", "correlation", "with", "human", "judgments"], "offsets": [55, 56, 57, 58, 59]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [54]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [65]}, {"text": "parent", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["parent"], "offsets": [71]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [66]}}, {"event_type": "MDS", "arguments": [{"text": "n - grams", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["n", "-", "grams"], "offsets": [75, 76, 77]}, {"text": "reference and generated texts", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["reference", "and", "generated", "texts"], "offsets": [80, 81, 82, 83]}, {"text": "semi - structured data", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["semi", "-", "structured", "data"], "offsets": [86, 87, 88, 89]}, {"text": "before computing their precision and recall", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["before", "computing", "their", "precision", "and", "recall"], "offsets": [90, 91, 92, 93, 94, 95]}], "trigger": {"text": "aligns", "tokens": ["aligns"], "offsets": [74]}}, {"event_type": "CMP", "arguments": [{"text": "through a large scale human evaluation study of table - to - text models for wikibio", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "a", "large", "scale", "human", "evaluation", "study", "of", "table", "-", "to", "-", "text", "models", "for", "wikibio"], "offsets": [97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112]}, {"text": "existing text generation metrics", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["existing", "text", "generation", "metrics"], "offsets": [124, 125, 126, 127]}, {"text": "parent", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["parent"], "offsets": [117]}, {"text": "human judgments", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["human", "judgments"], "offsets": [120, 121]}], "trigger": {"text": "correlates", "tokens": ["correlates"], "offsets": [118]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [129]}, {"text": "information extraction based evaluation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["information", "extraction", "based", "evaluation"], "offsets": [135, 136, 137, 138]}], "trigger": {"text": "adapt and evaluate", "tokens": ["adapt", "and", "evaluate"], "offsets": [131, 132, 133]}}, {"event_type": "FAC", "arguments": [{"text": "parent", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["parent"], "offsets": [151]}, {"text": "comparable correlation", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["comparable", "correlation"], "offsets": [153, 154]}, {"text": "while being easier to use", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "being", "easier", "to", "use"], "offsets": [158, 159, 160, 161, 162]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [152]}}, {"event_type": "FAC", "arguments": [{"text": "parent", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["parent"], "offsets": [167]}, {"text": "when the reference texts are elicited from humans using the data from the webnlg challenge", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "the", "reference", "texts", "are", "elicited", "from", "humans", "using", "the", "data", "from", "the", "webnlg", "challenge"], "offsets": [171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185]}], "trigger": {"text": "applicable", "tokens": ["applicable"], "offsets": [170]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [114]}, {"text": "correlates", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["correlates"], "offsets": [118]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [115]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [129]}, {"text": "has", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["has"], "offsets": [152]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [149]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [164]}, {"text": "applicable", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["applicable"], "offsets": [170]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [165]}}], "document": ["automatically", "constructed", "datasets", "for", "generating", "text", "from", "semi", "-", "structured", "data", "(", "tables", ")", ",", "such", "as", "wikibio", ",", "often", "contain", "reference", "texts", "that", "diverge", "from", "the", "information", "in", "the", "corresponding", "semi", "-", "structured", "data", ".", "we", "show", "that", "metrics", "which", "rely", "solely", "on", "the", "reference", "texts", ",", "such", "as", "bleu", "and", "rouge", ",", "show", "poor", "correlation", "with", "human", "judgments", "when", "those", "references", "diverge", ".", "we", "propose", "a", "new", "metric", ",", "parent", ",", "which", "aligns", "n", "-", "grams", "from", "the", "reference", "and", "generated", "texts", "to", "the", "semi", "-", "structured", "data", "before", "computing", "their", "precision", "and", "recall", ".", "through", "a", "large", "scale", "human", "evaluation", "study", "of", "table", "-", "to", "-", "text", "models", "for", "wikibio", ",", "we", "show", "that", "parent", "correlates", "with", "human", "judgments", "better", "than", "existing", "text", "generation", "metrics", ".", "we", "also", "adapt", "and", "evaluate", "the", "information", "extraction", "based", "evaluation", "proposed", "by", "wiseman", "et", "al", "(", "2017", ")", ",", "and", "show", "that", "parent", "has", "comparable", "correlation", "to", "it", ",", "while", "being", "easier", "to", "use", ".", "we", "show", "that", "parent", "is", "also", "applicable", "when", "the", "reference", "texts", "are", "elicited", "from", "humans", "using", "the", "data", "from", "the", "webnlg", "challenge", "."]}, {"venue": "ACL", "title": "Returning the N to NLP: Towards Contextually Personalized Classification Models", "abstract": "Most NLP models today treat language as universal, even though socio- and psycholingustic research shows that the communicated message is influenced by the characteristics of the speaker as well as the target audience. This paper surveys the landscape of personalization in natural language processing and related fields, and offers a path forward to mitigate the decades of deviation of the NLP tools from sociolingustic findings, allowing to flexibly process the \u201cnatural\u201d language of each user rather than enforcing a uniform NLP treatment. It outlines a possible direction to incorporate these aspects into neural NLP models by means of socially contextual personalization, and proposes to shift the focus of our evaluation strategies accordingly.", "doc_id": "03beffe9ba81d0443f242f341feeaa2f", "publication_year": 2020, "sentences": ["most nlp models today treat language as universal , even though socio - and psycholingustic research shows that the communicated message is influenced by the characteristics of the speaker as well as the target audience .", "this paper surveys the landscape of personalization in natural language processing and related fields , and offers a path forward to mitigate the decades of deviation of the nlp tools from sociolingustic findings , allowing to flexibly process the \u201c natural \u201d language of each user rather than enforcing a uniform nlp treatment .", "it outlines a possible direction to incorporate these aspects into neural nlp models by means of socially contextual personalization , and proposes to shift the focus of our evaluation strategies accordingly ."], "events": [{"event_type": "ITT", "arguments": [{"text": "landscape of personalization in natural language processing and related fields", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["landscape", "of", "personalization", "in", "natural", "language", "processing", "and", "related", "fields"], "offsets": [40, 41, 42, 43, 44, 45, 46, 47, 48, 49]}], "trigger": {"text": "surveys", "tokens": ["surveys"], "offsets": [38]}}, {"event_type": "MDS", "arguments": [{"text": "aspects", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["aspects"], "offsets": [98]}, {"text": "neural nlp models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["neural", "nlp", "models"], "offsets": [100, 101, 102]}, {"text": "means of socially contextual personalization", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["means", "of", "socially", "contextual", "personalization"], "offsets": [104, 105, 106, 107, 108]}], "trigger": {"text": "incorporate", "tokens": ["incorporate"], "offsets": [96]}}, {"event_type": "MDS", "arguments": [{"text": "focus of our evaluation strategies", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["focus", "of", "our", "evaluation", "strategies"], "offsets": [115, 116, 117, 118, 119]}], "trigger": {"text": "shift", "tokens": ["shift"], "offsets": [113]}}], "document": ["most", "nlp", "models", "today", "treat", "language", "as", "universal", ",", "even", "though", "socio", "-", "and", "psycholingustic", "research", "shows", "that", "the", "communicated", "message", "is", "influenced", "by", "the", "characteristics", "of", "the", "speaker", "as", "well", "as", "the", "target", "audience", ".", "this", "paper", "surveys", "the", "landscape", "of", "personalization", "in", "natural", "language", "processing", "and", "related", "fields", ",", "and", "offers", "a", "path", "forward", "to", "mitigate", "the", "decades", "of", "deviation", "of", "the", "nlp", "tools", "from", "sociolingustic", "findings", ",", "allowing", "to", "flexibly", "process", "the", "\u201c", "natural", "\u201d", "language", "of", "each", "user", "rather", "than", "enforcing", "a", "uniform", "nlp", "treatment", ".", "it", "outlines", "a", "possible", "direction", "to", "incorporate", "these", "aspects", "into", "neural", "nlp", "models", "by", "means", "of", "socially", "contextual", "personalization", ",", "and", "proposes", "to", "shift", "the", "focus", "of", "our", "evaluation", "strategies", "accordingly", "."]}, {"venue": "ACL", "title": "RNG-KBQA: Generation Augmented Iterative Ranking for Knowledge Base Question Answering", "abstract": "Existing KBQA approaches, despite achieving strong performance on i.i.d. test data, often struggle in generalizing to questions involving unseen KB schema items. Prior ranking-based approaches have shown some success in generalization, but suffer from the coverage issue. We present RnG-KBQA, a Rank-and-Generate approach for KBQA, which remedies the coverage issue with a generation model while preserving a strong generalization capability. Our approach first uses a contrastive ranker to rank a set of candidate logical forms obtained by searching over the knowledge graph. It then introduces a tailored generation model conditioned on the question and the top-ranked candidates to compose the final logical form. We achieve new state-of-the-art results on GrailQA and WebQSP datasets. In particular, our method surpasses the prior state-of-the-art by a large margin on the GrailQA leaderboard. In addition, RnG-KBQA outperforms all prior approaches on the popular WebQSP benchmark, even including the ones that use the oracle entity linking. The experimental results demonstrate the effectiveness of the interplay between ranking and generation, which leads to the superior performance of our proposed approach across all settings with especially strong improvements in zero-shot generalization.", "doc_id": "f724ffec12252107512fa572ff72e015", "publication_year": 2022, "sentences": ["existing kbqa approaches , despite achieving strong performance on i . i . d . test data , often struggle in generalizing to questions involving unseen kb schema items .", "prior ranking - based approaches have shown some success in generalization , but suffer from the coverage issue .", "we present rng - kbqa , a rank - and - generate approach for kbqa , which remedies the coverage issue with a generation model while preserving a strong generalization capability .", "our approach first uses a contrastive ranker to rank a set of candidate logical forms obtained by searching over the knowledge graph .", "it then introduces a tailored generation model conditioned on the question and the top - ranked candidates to compose the final logical form .", "we achieve new state - of - the - art results on grailqa and webqsp datasets .", "in particular , our method surpasses the prior state - of - the - art by a large margin on the grailqa leaderboard .", "in addition , rng - kbqa outperforms all prior approaches on the popular webqsp benchmark , even including the ones that use the oracle entity linking .", "the experimental results demonstrate the effectiveness of the interplay between ranking and generation , which leads to the superior performance of our proposed approach across all settings with especially strong improvements in zero - shot generalization ."], "events": [{"event_type": "RWF", "arguments": [{"text": "struggle", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["struggle"], "offsets": [19]}, {"text": "existing kbqa approaches", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "kbqa", "approaches"], "offsets": [0, 1, 2]}], "trigger": {"text": "struggle", "tokens": ["struggle"], "offsets": [19]}}, {"event_type": "RWF", "arguments": [{"text": "coverage issue", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["coverage", "issue"], "offsets": [46, 47]}, {"text": "ranking - based approaches", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["ranking", "-", "based", "approaches"], "offsets": [31, 32, 33, 34]}], "trigger": {"text": "suffer", "tokens": ["suffer"], "offsets": [43]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [49]}, {"text": "rng - kbqa", "nugget_type": "APP", "argument_type": "Content", "tokens": ["rng", "-", "kbqa"], "offsets": [51, 52, 53]}, {"text": "kbqa", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["kbqa"], "offsets": [63]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [50]}}, {"event_type": "MDS", "arguments": [{"text": "coverage issue", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["coverage", "issue"], "offsets": [68, 69]}, {"text": "while preserving a strong generalization capability", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "preserving", "a", "strong", "generalization", "capability"], "offsets": [74, 75, 76, 77, 78, 79]}, {"text": "generation model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["generation", "model"], "offsets": [72, 73]}], "trigger": {"text": "remedies", "tokens": ["remedies"], "offsets": [66]}}, {"event_type": "MDS", "arguments": [{"text": "contrastive ranker", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["contrastive", "ranker"], "offsets": [86, 87]}, {"text": "candidate logical forms", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["candidate", "logical", "forms"], "offsets": [93, 94, 95]}, {"text": "knowledge graph", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["knowledge", "graph"], "offsets": [101, 102]}], "trigger": {"text": "rank", "tokens": ["rank"], "offsets": [89]}}, {"event_type": "MDS", "arguments": [{"text": "question", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["question"], "offsets": [114]}, {"text": "top - ranked candidates", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["top", "-", "ranked", "candidates"], "offsets": [117, 118, 119, 120]}, {"text": "final logical form", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["final", "logical", "form"], "offsets": [124, 125, 126]}], "trigger": {"text": "compose", "tokens": ["compose"], "offsets": [122]}}, {"event_type": "FAC", "arguments": [{"text": "new state - of - the - art results", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["new", "state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [130, 131, 132, 133, 134, 135, 136, 137, 138]}, {"text": "grailqa datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["grailqa", "datasets"], "offsets": [140, 143]}, {"text": "webqsp datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["webqsp", "datasets"], "offsets": [142, 143]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [129]}}, {"event_type": "CMP", "arguments": [{"text": "prior state - of - the - art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["prior", "state", "-", "of", "-", "the", "-", "art"], "offsets": [152, 153, 154, 155, 156, 157, 158, 159]}, {"text": "large margin", "nugget_type": "STR", "argument_type": "Result", "tokens": ["large", "margin"], "offsets": [162, 163]}, {"text": "on the grailqa leaderboard", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "grailqa", "leaderboard"], "offsets": [164, 165, 166, 167]}, {"text": "rng - kbqa", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["rng", "-", "kbqa"], "offsets": [51, 52, 53]}], "trigger": {"text": "surpasses", "tokens": ["surpasses"], "offsets": [150]}}, {"event_type": "CMP", "arguments": [{"text": "rng - kbqa", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["rng", "-", "kbqa"], "offsets": [172, 173, 174]}, {"text": "all prior approaches", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["all", "prior", "approaches"], "offsets": [176, 177, 178]}, {"text": "on the popular webqsp benchmark", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "popular", "webqsp", "benchmark"], "offsets": [179, 180, 181, 182, 183]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [175]}}, {"event_type": "FAC", "arguments": [{"text": "effectiveness of the interplay between ranking and generation", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["effectiveness", "of", "the", "interplay", "between", "ranking", "and", "generation"], "offsets": [201, 202, 203, 204, 205, 206, 207, 208]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [199]}}, {"event_type": "FAC", "arguments": [{"text": "across all settings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "all", "settings"], "offsets": [220, 221, 222]}, {"text": "zero - shot generalization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["zero", "-", "shot", "generalization"], "offsets": [228, 229, 230, 231]}, {"text": "strong improvements", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["strong", "improvements"], "offsets": [225, 226]}, {"text": "superior performance of our proposed approach", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["superior", "performance", "of", "our", "proposed", "approach"], "offsets": [214, 215, 216, 217, 218, 219]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [211]}}, {"event_type": "PRP", "arguments": [{"text": "tailored generation model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["tailored", "generation", "model"], "offsets": [108, 109, 110]}], "trigger": {"text": "introduces", "tokens": ["introduces"], "offsets": [106]}}], "document": ["existing", "kbqa", "approaches", ",", "despite", "achieving", "strong", "performance", "on", "i", ".", "i", ".", "d", ".", "test", "data", ",", "often", "struggle", "in", "generalizing", "to", "questions", "involving", "unseen", "kb", "schema", "items", ".", "prior", "ranking", "-", "based", "approaches", "have", "shown", "some", "success", "in", "generalization", ",", "but", "suffer", "from", "the", "coverage", "issue", ".", "we", "present", "rng", "-", "kbqa", ",", "a", "rank", "-", "and", "-", "generate", "approach", "for", "kbqa", ",", "which", "remedies", "the", "coverage", "issue", "with", "a", "generation", "model", "while", "preserving", "a", "strong", "generalization", "capability", ".", "our", "approach", "first", "uses", "a", "contrastive", "ranker", "to", "rank", "a", "set", "of", "candidate", "logical", "forms", "obtained", "by", "searching", "over", "the", "knowledge", "graph", ".", "it", "then", "introduces", "a", "tailored", "generation", "model", "conditioned", "on", "the", "question", "and", "the", "top", "-", "ranked", "candidates", "to", "compose", "the", "final", "logical", "form", ".", "we", "achieve", "new", "state", "-", "of", "-", "the", "-", "art", "results", "on", "grailqa", "and", "webqsp", "datasets", ".", "in", "particular", ",", "our", "method", "surpasses", "the", "prior", "state", "-", "of", "-", "the", "-", "art", "by", "a", "large", "margin", "on", "the", "grailqa", "leaderboard", ".", "in", "addition", ",", "rng", "-", "kbqa", "outperforms", "all", "prior", "approaches", "on", "the", "popular", "webqsp", "benchmark", ",", "even", "including", "the", "ones", "that", "use", "the", "oracle", "entity", "linking", ".", "the", "experimental", "results", "demonstrate", "the", "effectiveness", "of", "the", "interplay", "between", "ranking", "and", "generation", ",", "which", "leads", "to", "the", "superior", "performance", "of", "our", "proposed", "approach", "across", "all", "settings", "with", "especially", "strong", "improvements", "in", "zero", "-", "shot", "generalization", "."]}, {"venue": "ACL", "title": "Unsupervised Pronoun Resolution via Masked Noun-Phrase Prediction", "abstract": "In this work, we propose Masked Noun-Phrase Prediction (MNPP), a pre-training strategy to tackle pronoun resolution in a fully unsupervised setting. Firstly, We evaluate our pre-trained model on various pronoun resolution datasets without any finetuning. Our method outperforms all previous unsupervised methods on all datasets by large margins. Secondly, we proceed to a few-shot setting where we finetune our pre-trained model on WinoGrande-S and XS separately. Our method outperforms RoBERTa-large baseline with large margins, meanwhile, achieving a higher AUC score after further finetuning on the remaining three official splits of WinoGrande.", "doc_id": "c8ad8ef5e4a80ae292cd252de5eb8c2b", "publication_year": 2021, "sentences": ["in this work , we propose masked noun - phrase prediction ( mnpp ) , a pre - training strategy to tackle pronoun resolution in a fully unsupervised setting .", "firstly , we evaluate our pre - trained model on various pronoun resolution datasets without any finetuning .", "our method outperforms all previous unsupervised methods on all datasets by large margins .", "secondly , we proceed to a few - shot setting where we finetune our pre - trained model on winogrande - s and xs separately .", "our method outperforms roberta - large baseline with large margins , meanwhile , achieving a higher auc score after further finetuning on the remaining three official splits of winogrande ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [4]}, {"text": "masked noun - phrase prediction", "nugget_type": "APP", "argument_type": "Content", "tokens": ["masked", "noun", "-", "phrase", "prediction"], "offsets": [6, 7, 8, 9, 10]}, {"text": "tackle", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["tackle"], "offsets": [21]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [5]}}, {"event_type": "PUR", "arguments": [{"text": "pronoun resolution", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["pronoun", "resolution"], "offsets": [22, 23]}, {"text": "in a fully unsupervised setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "fully", "unsupervised", "setting"], "offsets": [24, 25, 26, 27, 28]}], "trigger": {"text": "tackle", "tokens": ["tackle"], "offsets": [21]}}, {"event_type": "MDS", "arguments": [{"text": "without any finetuning", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "any", "finetuning"], "offsets": [44, 45, 46]}, {"text": "pre - trained model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["pre", "-", "trained", "model"], "offsets": [35, 36, 37, 38]}, {"text": "pronoun resolution datasets", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["pronoun", "resolution", "datasets"], "offsets": [41, 42, 43]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [33]}}, {"event_type": "CMP", "arguments": [{"text": "masked noun - phrase prediction", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["masked", "noun", "-", "phrase", "prediction"], "offsets": [6, 7, 8, 9, 10]}, {"text": "all previous unsupervised methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["all", "previous", "unsupervised", "methods"], "offsets": [51, 52, 53, 54]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [50]}, {"text": "large margins", "nugget_type": "STR", "argument_type": "Result", "tokens": ["large", "margins"], "offsets": [59, 60]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [50]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [64]}, {"text": "few - shot setting", "nugget_type": "APP", "argument_type": "Content", "tokens": ["few", "-", "shot", "setting"], "offsets": [68, 69, 70, 71]}], "trigger": {"text": "proceed", "tokens": ["proceed"], "offsets": [65]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [73]}, {"text": "pre - trained model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pre", "-", "trained", "model"], "offsets": [76, 77, 78, 79]}, {"text": "on winogrande - s and xs separately", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "winogrande", "-", "s", "and", "xs", "separately"], "offsets": [80, 81, 82, 83, 84, 85, 86]}], "trigger": {"text": "finetune", "tokens": ["finetune"], "offsets": [74]}}, {"event_type": "CMP", "arguments": [{"text": "masked noun - phrase prediction", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["masked", "noun", "-", "phrase", "prediction"], "offsets": [6, 7, 8, 9, 10]}, {"text": "roberta - large baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["roberta", "-", "large", "baseline"], "offsets": [91, 92, 93, 94]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [90]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [90]}}, {"event_type": "FAC", "arguments": [{"text": "masked noun - phrase prediction", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["masked", "noun", "-", "phrase", "prediction"], "offsets": [6, 7, 8, 9, 10]}, {"text": "after further finetuning on the remaining three official splits of winogrande", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["after", "further", "finetuning", "on", "the", "remaining", "three", "official", "splits", "of", "winogrande"], "offsets": [106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116]}, {"text": "higher auc score", "nugget_type": "STR", "argument_type": "Object", "tokens": ["higher", "auc", "score"], "offsets": [103, 104, 105]}], "trigger": {"text": "achieving", "tokens": ["achieving"], "offsets": [101]}}], "document": ["in", "this", "work", ",", "we", "propose", "masked", "noun", "-", "phrase", "prediction", "(", "mnpp", ")", ",", "a", "pre", "-", "training", "strategy", "to", "tackle", "pronoun", "resolution", "in", "a", "fully", "unsupervised", "setting", ".", "firstly", ",", "we", "evaluate", "our", "pre", "-", "trained", "model", "on", "various", "pronoun", "resolution", "datasets", "without", "any", "finetuning", ".", "our", "method", "outperforms", "all", "previous", "unsupervised", "methods", "on", "all", "datasets", "by", "large", "margins", ".", "secondly", ",", "we", "proceed", "to", "a", "few", "-", "shot", "setting", "where", "we", "finetune", "our", "pre", "-", "trained", "model", "on", "winogrande", "-", "s", "and", "xs", "separately", ".", "our", "method", "outperforms", "roberta", "-", "large", "baseline", "with", "large", "margins", ",", "meanwhile", ",", "achieving", "a", "higher", "auc", "score", "after", "further", "finetuning", "on", "the", "remaining", "three", "official", "splits", "of", "winogrande", "."]}, {"venue": "ACL", "title": "Improving Neural Conversational Models with Entropy-Based Data Filtering", "abstract": "Current neural network-based conversational models lack diversity and generate boring responses to open-ended utterances. Priors such as persona, emotion, or topic provide additional information to dialog models to aid response generation, but annotating a dataset with priors is expensive and such annotations are rarely available. While previous methods for improving the quality of open-domain response generation focused on either the underlying model or the training objective, we present a method of filtering dialog datasets by removing generic utterances from training data using a simple entropy-based approach that does not require human supervision. We conduct extensive experiments with different variations of our method, and compare dialog models across 17 evaluation metrics to show that training on datasets filtered this way results in better conversational quality as chatbots learn to output more diverse responses.", "doc_id": "b2a3bb9037abda811eceaf79c4f63d65", "publication_year": 2019, "sentences": ["current neural network - based conversational models lack diversity and generate boring responses to open - ended utterances .", "priors such as persona , emotion , or topic provide additional information to dialog models to aid response generation , but annotating a dataset with priors is expensive and such annotations are rarely available .", "while previous methods for improving the quality of open - domain response generation focused on either the underlying model or the training objective , we present a method of filtering dialog datasets by removing generic utterances from training data using a simple entropy - based approach that does not require human supervision .", "we conduct extensive experiments with different variations of our method , and compare dialog models across 17 evaluation metrics to show that training on datasets filtered this way results in better conversational quality as chatbots learn to output more diverse responses ."], "events": [{"event_type": "RWF", "arguments": [{"text": "current neural network - based conversational models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["current", "neural", "network", "-", "based", "conversational", "models"], "offsets": [0, 1, 2, 3, 4, 5, 6]}, {"text": "lack", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lack"], "offsets": [7]}], "trigger": {"text": "lack", "tokens": ["lack"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "current neural network - based conversational models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["current", "neural", "network", "-", "based", "conversational", "models"], "offsets": [0, 1, 2, 3, 4, 5, 6]}, {"text": "boring responses", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["boring", "responses"], "offsets": [11, 12]}, {"text": "to open - ended utterances", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "open", "-", "ended", "utterances"], "offsets": [13, 14, 15, 16, 17]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "expensive", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["expensive"], "offsets": [46]}], "trigger": {"text": "expensive", "tokens": ["expensive"], "offsets": [46]}}, {"event_type": "RWF", "arguments": [{"text": "rarely available", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["rarely", "available"], "offsets": [51, 52]}], "trigger": {"text": "rarely available", "tokens": ["rarely", "available"], "offsets": [51, 52]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [78]}, {"text": "method of filtering dialog datasets", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method", "of", "filtering", "dialog", "datasets"], "offsets": [81, 82, 83, 84, 85]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [79]}}, {"event_type": "MDS", "arguments": [{"text": "simple entropy - based approach", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["simple", "entropy", "-", "based", "approach"], "offsets": [95, 96, 97, 98, 99]}, {"text": "removing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["removing"], "offsets": [87]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [93]}}, {"event_type": "PUR", "arguments": [{"text": "generic utterances", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["generic", "utterances"], "offsets": [88, 89]}, {"text": "from training data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "training", "data"], "offsets": [90, 91, 92]}], "trigger": {"text": "removing", "tokens": ["removing"], "offsets": [87]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [107]}, {"text": "extensive experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["extensive", "experiments"], "offsets": [109, 110]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [108]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [107]}, {"text": "dialog models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["dialog", "models"], "offsets": [120, 121]}, {"text": "across 17 evaluation metrics", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "17", "evaluation", "metrics"], "offsets": [122, 123, 124, 125]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [119]}}, {"event_type": "FIN", "arguments": [{"text": "results", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["results"], "offsets": [135]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [127]}}, {"event_type": "FAC", "arguments": [{"text": "training on datasets filtered this way", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["training", "on", "datasets", "filtered", "this", "way"], "offsets": [129, 130, 131, 132, 133, 134]}, {"text": "better conversational quality", "nugget_type": "STR", "argument_type": "Object", "tokens": ["better", "conversational", "quality"], "offsets": [137, 138, 139]}, {"text": "as chatbots learn to output more diverse responses", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "chatbots", "learn", "to", "output", "more", "diverse", "responses"], "offsets": [140, 141, 142, 143, 144, 145, 146, 147]}], "trigger": {"text": "results", "tokens": ["results"], "offsets": [135]}}], "document": ["current", "neural", "network", "-", "based", "conversational", "models", "lack", "diversity", "and", "generate", "boring", "responses", "to", "open", "-", "ended", "utterances", ".", "priors", "such", "as", "persona", ",", "emotion", ",", "or", "topic", "provide", "additional", "information", "to", "dialog", "models", "to", "aid", "response", "generation", ",", "but", "annotating", "a", "dataset", "with", "priors", "is", "expensive", "and", "such", "annotations", "are", "rarely", "available", ".", "while", "previous", "methods", "for", "improving", "the", "quality", "of", "open", "-", "domain", "response", "generation", "focused", "on", "either", "the", "underlying", "model", "or", "the", "training", "objective", ",", "we", "present", "a", "method", "of", "filtering", "dialog", "datasets", "by", "removing", "generic", "utterances", "from", "training", "data", "using", "a", "simple", "entropy", "-", "based", "approach", "that", "does", "not", "require", "human", "supervision", ".", "we", "conduct", "extensive", "experiments", "with", "different", "variations", "of", "our", "method", ",", "and", "compare", "dialog", "models", "across", "17", "evaluation", "metrics", "to", "show", "that", "training", "on", "datasets", "filtered", "this", "way", "results", "in", "better", "conversational", "quality", "as", "chatbots", "learn", "to", "output", "more", "diverse", "responses", "."]}, {"venue": "ACL", "title": "Generating Data to Mitigate Spurious Correlations in Natural Language Inference Datasets", "abstract": "Natural language processing models often exploit spurious correlations between task-independent features and labels in datasets to perform well only within the distributions they are trained on, while not generalising to different task distributions. We propose to tackle this problem by generating a debiased version of a dataset, which can then be used to train a debiased, off-the-shelf model, by simply replacing its training data. Our approach consists of 1) a method for training data generators to generate high-quality, label-consistent data samples; and 2) a filtering mechanism for removing data points that contribute to spurious correlations, measured in terms of z-statistics. We generate debiased versions of the SNLI and MNLI datasets, and we evaluate on a large suite of debiased, out-of-distribution, and adversarial test sets. Results show that models trained on our debiased datasets generalise better than those trained on the original datasets in all settings. On the majority of the datasets, our method outperforms or performs comparably to previous state-of-the-art debiasing strategies, and when combined with an orthogonal technique, product-of-experts, it improves further and outperforms previous best results of SNLI-hard and MNLI-hard.", "doc_id": "b4eabce40617ff71ea3f50e5759450ae", "publication_year": 2022, "sentences": ["natural language processing models often exploit spurious correlations between task - independent features and labels in datasets to perform well only within the distributions they are trained on , while not generalising to different task distributions .", "we propose to tackle this problem by generating a debiased version of a dataset , which can then be used to train a debiased , off - the - shelf model , by simply replacing its training data .", "our approach consists of 1 ) a method for training data generators to generate high - quality , label - consistent data samples ; and 2 ) a filtering mechanism for removing data points that contribute to spurious correlations , measured in terms of z - statistics .", "we generate debiased versions of the snli and mnli datasets , and we evaluate on a large suite of debiased , out - of - distribution , and adversarial test sets .", "results show that models trained on our debiased datasets generalise better than those trained on the original datasets in all settings .", "on the majority of the datasets , our method outperforms or performs comparably to previous state - of - the - art debiasing strategies , and when combined with an orthogonal technique , product - of - experts , it improves further and outperforms previous best results of snli - hard and mnli - hard ."], "events": [{"event_type": "RWF", "arguments": [{"text": "natural language processing models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["natural", "language", "processing", "models"], "offsets": [0, 1, 2, 3]}, {"text": "not generalising", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "generalising"], "offsets": [30, 31]}], "trigger": {"text": "not generalising", "tokens": ["not", "generalising"], "offsets": [30, 31]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [37]}, {"text": "debiased version of a dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["debiased", "version", "of", "a", "dataset"], "offsets": [46, 47, 48, 49, 50]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [44]}}, {"event_type": "MDS", "arguments": [{"text": "training data", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["training", "data"], "offsets": [73, 74]}, {"text": "train", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["train"], "offsets": [58]}], "trigger": {"text": "replacing", "tokens": ["replacing"], "offsets": [71]}}, {"event_type": "PUR", "arguments": [{"text": "debiased , off - the - shelf model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["debiased", ",", "off", "-", "the", "-", "shelf", "model"], "offsets": [60, 61, 62, 63, 64, 65, 66, 67]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [58]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [124]}, {"text": "debiased versions of snli datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["debiased", "versions", "of", "snli", "datasets"], "offsets": [126, 127, 128, 130, 133]}, {"text": "debiased versions of mnli datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["debiased", "versions", "of", "mnli", "datasets"], "offsets": [126, 127, 128, 132, 133]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [125]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [136]}, {"text": "test sets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["test", "sets"], "offsets": [153, 154]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [137]}}, {"event_type": "FIN", "arguments": [{"text": "generalise", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["generalise"], "offsets": [165]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [157]}}, {"event_type": "CMP", "arguments": [{"text": "models trained on our debiased datasets", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["models", "trained", "on", "our", "debiased", "datasets"], "offsets": [159, 160, 161, 162, 163, 164]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [166]}, {"text": "those trained on the original datasets", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["those", "trained", "on", "the", "original", "datasets"], "offsets": [168, 169, 170, 171, 172, 173]}, {"text": "in all settings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "all", "settings"], "offsets": [174, 175, 176]}], "trigger": {"text": "generalise", "tokens": ["generalise"], "offsets": [165]}}, {"event_type": "CMP", "arguments": [{"text": "on the majority of the datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "majority", "of", "the", "datasets"], "offsets": [178, 179, 180, 181, 182, 183]}, {"text": "method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["method"], "offsets": [186]}, {"text": "outperforms or performs comparably", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms", "or", "performs", "comparably"], "offsets": [187, 188, 189, 190]}], "trigger": {"text": "outperforms or performs comparably", "tokens": ["outperforms", "or", "performs", "comparably"], "offsets": [187, 188, 189, 190]}}, {"event_type": "CMP", "arguments": [{"text": "method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["method"], "offsets": [186]}, {"text": "improves further and outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves", "further", "and", "outperforms"], "offsets": [218, 219, 220, 221]}, {"text": "when combined with an orthogonal technique , product - of - experts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "combined", "with", "an", "orthogonal", "technique", ",", "product", "-", "of", "-", "experts"], "offsets": [204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215]}, {"text": "snli - hard and mnli - hard", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["snli", "-", "hard", "and", "mnli", "-", "hard"], "offsets": [226, 227, 228, 229, 230, 231, 232]}], "trigger": {"text": "improves further and outperforms", "tokens": ["improves", "further", "and", "outperforms"], "offsets": [218, 219, 220, 221]}}, {"event_type": "MDS", "arguments": [{"text": "data generators", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["data", "generators"], "offsets": [86, 87]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [89]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [85]}}, {"event_type": "PUR", "arguments": [{"text": "high - quality , label - consistent data samples", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["high", "-", "quality", ",", "label", "-", "consistent", "data", "samples"], "offsets": [90, 91, 92, 93, 94, 95, 96, 97, 98]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [89]}}, {"event_type": "MDS", "arguments": [{"text": "filtering mechanism", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["filtering", "mechanism"], "offsets": [104, 105]}, {"text": "data points", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["data", "points"], "offsets": [108, 109]}], "trigger": {"text": "removing", "tokens": ["removing"], "offsets": [107]}}], "document": ["natural", "language", "processing", "models", "often", "exploit", "spurious", "correlations", "between", "task", "-", "independent", "features", "and", "labels", "in", "datasets", "to", "perform", "well", "only", "within", "the", "distributions", "they", "are", "trained", "on", ",", "while", "not", "generalising", "to", "different", "task", "distributions", ".", "we", "propose", "to", "tackle", "this", "problem", "by", "generating", "a", "debiased", "version", "of", "a", "dataset", ",", "which", "can", "then", "be", "used", "to", "train", "a", "debiased", ",", "off", "-", "the", "-", "shelf", "model", ",", "by", "simply", "replacing", "its", "training", "data", ".", "our", "approach", "consists", "of", "1", ")", "a", "method", "for", "training", "data", "generators", "to", "generate", "high", "-", "quality", ",", "label", "-", "consistent", "data", "samples", ";", "and", "2", ")", "a", "filtering", "mechanism", "for", "removing", "data", "points", "that", "contribute", "to", "spurious", "correlations", ",", "measured", "in", "terms", "of", "z", "-", "statistics", ".", "we", "generate", "debiased", "versions", "of", "the", "snli", "and", "mnli", "datasets", ",", "and", "we", "evaluate", "on", "a", "large", "suite", "of", "debiased", ",", "out", "-", "of", "-", "distribution", ",", "and", "adversarial", "test", "sets", ".", "results", "show", "that", "models", "trained", "on", "our", "debiased", "datasets", "generalise", "better", "than", "those", "trained", "on", "the", "original", "datasets", "in", "all", "settings", ".", "on", "the", "majority", "of", "the", "datasets", ",", "our", "method", "outperforms", "or", "performs", "comparably", "to", "previous", "state", "-", "of", "-", "the", "-", "art", "debiasing", "strategies", ",", "and", "when", "combined", "with", "an", "orthogonal", "technique", ",", "product", "-", "of", "-", "experts", ",", "it", "improves", "further", "and", "outperforms", "previous", "best", "results", "of", "snli", "-", "hard", "and", "mnli", "-", "hard", "."]}, {"venue": "ACL", "title": "Better than Average: Paired Evaluation of NLP systems", "abstract": "Evaluation in NLP is usually done by comparing the scores of competing systems independently averaged over a common set of test instances. In this work, we question the use of averages for aggregating evaluation scores into a final number used to decide which system is best, since the average, as well as alternatives such as the median, ignores the pairing arising from the fact that systems are evaluated on the same test instances. We illustrate the importance of taking the instancelevel pairing of evaluation scores into account and demonstrate, both theoretically and empirically, the advantages of aggregation methods based on pairwise comparisons, such as the Bradley\u2013Terry (BT) model, a mechanism based on the estimated probability that a given system scores better than another on the test set. By re-evaluating 296 real NLP evaluation setups across four tasks and 18 evaluation metrics, we show that the choice of aggregation mechanism matters and yields different conclusions as to which systems are state of the art in about 30% of the setups. To facilitate the adoption of pairwise evaluation, we release a practical tool for performing the full analysis of evaluation scores with the mean, median, BT, and two variants of BT (Elo and TrueSkill), alongside functionality for appropriate statistical testing.", "doc_id": "c62e755ef14f82b9884909a2d220aeb1", "publication_year": 2021, "sentences": ["evaluation in nlp is usually done by comparing the scores of competing systems independently averaged over a common set of test instances .", "in this work , we question the use of averages for aggregating evaluation scores into a final number used to decide which system is best , since the average , as well as alternatives such as the median , ignores the pairing arising from the fact that systems are evaluated on the same test instances .", "we illustrate the importance of taking the instancelevel pairing of evaluation scores into account and demonstrate , both theoretically and empirically , the advantages of aggregation methods based on pairwise comparisons , such as the bradley \u2013 terry ( bt ) model , a mechanism based on the estimated probability that a given system scores better than another on the test set .", "by re - evaluating 296 real nlp evaluation setups across four tasks and 18 evaluation metrics , we show that the choice of aggregation mechanism matters and yields different conclusions as to which systems are state of the art in about 30 % of the setups .", "to facilitate the adoption of pairwise evaluation , we release a practical tool for performing the full analysis of evaluation scores with the mean , median , bt , and two variants of bt ( elo and trueskill ) , alongside functionality for appropriate statistical testing ."], "events": [{"event_type": "ITT", "arguments": [{"text": "scores of competing systems", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["scores", "of", "competing", "systems"], "offsets": [9, 10, 11, 12]}], "trigger": {"text": "comparing", "tokens": ["comparing"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "the average", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["the", "average"], "offsets": [50, 51]}, {"text": "pairing arising", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["pairing", "arising"], "offsets": [64, 65]}], "trigger": {"text": "ignores", "tokens": ["ignores"], "offsets": [62]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [79]}, {"text": "instancelevel pairing of evaluation scores into account", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["instancelevel", "pairing", "of", "evaluation", "scores", "into", "account"], "offsets": [86, 87, 88, 89, 90, 91, 92]}], "trigger": {"text": "taking", "tokens": ["taking"], "offsets": [84]}}, {"event_type": "FIN", "arguments": [{"text": "better than", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["better", "than"], "offsets": [134, 135]}, {"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [79]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [94]}}, {"event_type": "CMP", "arguments": [{"text": "another", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["another"], "offsets": [136]}, {"text": "aggregation methods", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["aggregation", "methods"], "offsets": [104, 105]}, {"text": "on the estimated probability that a given system", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "estimated", "probability", "that", "a", "given", "system"], "offsets": [125, 126, 127, 128, 129, 130, 131, 132]}, {"text": "scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["scores"], "offsets": [133]}], "trigger": {"text": "better than", "tokens": ["better", "than"], "offsets": [134, 135]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [159]}, {"text": "matters and yields", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["matters", "and", "yields"], "offsets": [167, 168, 169]}, {"text": "state of the art", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["state", "of", "the", "art"], "offsets": [177, 178, 179, 180]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [160]}}, {"event_type": "FAC", "arguments": [{"text": "choice of aggregation mechanism", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["choice", "of", "aggregation", "mechanism"], "offsets": [163, 164, 165, 166]}, {"text": "different conclusions", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["different", "conclusions"], "offsets": [170, 171]}], "trigger": {"text": "matters and yields", "tokens": ["matters", "and", "yields"], "offsets": [167, 168, 169]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [197]}, {"text": "practical tool", "nugget_type": "APP", "argument_type": "Content", "tokens": ["practical", "tool"], "offsets": [200, 201]}, {"text": "facilitate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["facilitate"], "offsets": [190]}], "trigger": {"text": "release", "tokens": ["release"], "offsets": [198]}}, {"event_type": "PUR", "arguments": [{"text": "adoption of pairwise evaluation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["adoption", "of", "pairwise", "evaluation"], "offsets": [192, 193, 194, 195]}], "trigger": {"text": "facilitate", "tokens": ["facilitate"], "offsets": [190]}}, {"event_type": "MDS", "arguments": [{"text": "mean", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["mean"], "offsets": [212]}, {"text": "median", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["median"], "offsets": [214]}, {"text": "bt", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["bt"], "offsets": [216]}, {"text": "two variants of bt", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["two", "variants", "of", "bt"], "offsets": [219, 220, 221, 222]}, {"text": "evaluation scores", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["evaluation", "scores"], "offsets": [208, 209]}, {"text": "full analysis", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["full", "analysis"], "offsets": [205, 206]}], "trigger": {"text": "performing", "tokens": ["performing"], "offsets": [203]}}, {"event_type": "FAC", "arguments": [{"text": "systems", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["systems"], "offsets": [175]}, {"text": "in about 30 % of the setups", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "about", "30", "%", "of", "the", "setups"], "offsets": [181, 182, 183, 184, 185, 186, 187]}], "trigger": {"text": "state of the art", "tokens": ["state", "of", "the", "art"], "offsets": [177, 178, 179, 180]}}], "document": ["evaluation", "in", "nlp", "is", "usually", "done", "by", "comparing", "the", "scores", "of", "competing", "systems", "independently", "averaged", "over", "a", "common", "set", "of", "test", "instances", ".", "in", "this", "work", ",", "we", "question", "the", "use", "of", "averages", "for", "aggregating", "evaluation", "scores", "into", "a", "final", "number", "used", "to", "decide", "which", "system", "is", "best", ",", "since", "the", "average", ",", "as", "well", "as", "alternatives", "such", "as", "the", "median", ",", "ignores", "the", "pairing", "arising", "from", "the", "fact", "that", "systems", "are", "evaluated", "on", "the", "same", "test", "instances", ".", "we", "illustrate", "the", "importance", "of", "taking", "the", "instancelevel", "pairing", "of", "evaluation", "scores", "into", "account", "and", "demonstrate", ",", "both", "theoretically", "and", "empirically", ",", "the", "advantages", "of", "aggregation", "methods", "based", "on", "pairwise", "comparisons", ",", "such", "as", "the", "bradley", "\u2013", "terry", "(", "bt", ")", "model", ",", "a", "mechanism", "based", "on", "the", "estimated", "probability", "that", "a", "given", "system", "scores", "better", "than", "another", "on", "the", "test", "set", ".", "by", "re", "-", "evaluating", "296", "real", "nlp", "evaluation", "setups", "across", "four", "tasks", "and", "18", "evaluation", "metrics", ",", "we", "show", "that", "the", "choice", "of", "aggregation", "mechanism", "matters", "and", "yields", "different", "conclusions", "as", "to", "which", "systems", "are", "state", "of", "the", "art", "in", "about", "30", "%", "of", "the", "setups", ".", "to", "facilitate", "the", "adoption", "of", "pairwise", "evaluation", ",", "we", "release", "a", "practical", "tool", "for", "performing", "the", "full", "analysis", "of", "evaluation", "scores", "with", "the", "mean", ",", "median", ",", "bt", ",", "and", "two", "variants", "of", "bt", "(", "elo", "and", "trueskill", ")", ",", "alongside", "functionality", "for", "appropriate", "statistical", "testing", "."]}, {"venue": "ACL", "title": "Modelling Context and Syntactical Features for Aspect-based Sentiment Analysis", "abstract": "The aspect-based sentiment analysis (ABSA) consists of two conceptual tasks, namely an aspect extraction and an aspect sentiment classification. Rather than considering the tasks separately, we build an end-to-end ABSA solution. Previous works in ABSA tasks did not fully leverage the importance of syntactical information. Hence, the aspect extraction model often failed to detect the boundaries of multi-word aspect terms. On the other hand, the aspect sentiment classifier was unable to account for the syntactical correlation between aspect terms and the context words. This paper explores the grammatical aspect of the sentence and employs the self-attention mechanism for syntactical learning. We combine part-of-speech embeddings, dependency-based embeddings and contextualized embeddings (e.g. BERT, RoBERTa) to enhance the performance of the aspect extractor. We also propose the syntactic relative distance to de-emphasize the adverse effects of unrelated words, having weak syntactic connection with the aspect terms. This increases the accuracy of the aspect sentiment classifier. Our solutions outperform the state-of-the-art models on SemEval-2014 dataset in both two subtasks.", "doc_id": "f229a2b9eaa647cacd7202ed997872c9", "publication_year": 2020, "sentences": ["the aspect - based sentiment analysis ( absa ) consists of two conceptual tasks , namely an aspect extraction and an aspect sentiment classification .", "rather than considering the tasks separately , we build an end - to - end absa solution .", "previous works in absa tasks did not fully leverage the importance of syntactical information .", "hence , the aspect extraction model often failed to detect the boundaries of multi - word aspect terms .", "on the other hand , the aspect sentiment classifier was unable to account for the syntactical correlation between aspect terms and the context words .", "this paper explores the grammatical aspect of the sentence and employs the self - attention mechanism for syntactical learning .", "we combine part - of - speech embeddings , dependency - based embeddings and contextualized embeddings ( e . g . bert , roberta ) to enhance the performance of the aspect extractor .", "we also propose the syntactic relative distance to de - emphasize the adverse effects of unrelated words , having weak syntactic connection with the aspect terms .", "this increases the accuracy of the aspect sentiment classifier .", "our solutions outperform the state - of - the - art models on semeval - 2014 dataset in both two subtasks ."], "events": [{"event_type": "ITT", "arguments": [{"text": "aspect - based sentiment analysis", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["aspect", "-", "based", "sentiment", "analysis"], "offsets": [1, 2, 3, 4, 5]}], "trigger": {"text": "consists", "tokens": ["consists"], "offsets": [9]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [32]}, {"text": "end - to - end absa solution", "nugget_type": "APP", "argument_type": "Content", "tokens": ["end", "-", "to", "-", "end", "absa", "solution"], "offsets": [35, 36, 37, 38, 39, 40, 41]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [33]}}, {"event_type": "RWF", "arguments": [{"text": "previous works", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["previous", "works"], "offsets": [43, 44]}, {"text": "importance of syntactical information", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["importance", "of", "syntactical", "information"], "offsets": [53, 54, 55, 56]}], "trigger": {"text": "not fully leverage", "tokens": ["not", "fully", "leverage"], "offsets": [49, 50, 51]}}, {"event_type": "RWF", "arguments": [{"text": "aspect extraction model", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["aspect", "extraction", "model"], "offsets": [61, 62, 63]}, {"text": "failed to detect", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["failed", "to", "detect"], "offsets": [65, 66, 67]}], "trigger": {"text": "failed to detect", "tokens": ["failed", "to", "detect"], "offsets": [65, 66, 67]}}, {"event_type": "RWF", "arguments": [{"text": "aspect sentiment classifier", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["aspect", "sentiment", "classifier"], "offsets": [83, 84, 85]}, {"text": "unable to account", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unable", "to", "account"], "offsets": [87, 88, 89]}], "trigger": {"text": "unable to account", "tokens": ["unable", "to", "account"], "offsets": [87, 88, 89]}}, {"event_type": "WKS", "arguments": [{"text": "grammatical aspect of the sentence", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["grammatical", "aspect", "of", "the", "sentence"], "offsets": [106, 107, 108, 109, 110]}], "trigger": {"text": "explores", "tokens": ["explores"], "offsets": [104]}}, {"event_type": "MDS", "arguments": [{"text": "self - attention mechanism", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["self", "-", "attention", "mechanism"], "offsets": [114, 115, 116, 117]}, {"text": "syntactical learning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["syntactical", "learning"], "offsets": [119, 120]}], "trigger": {"text": "employs", "tokens": ["employs"], "offsets": [112]}}, {"event_type": "MDS", "arguments": [{"text": "enhance", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enhance"], "offsets": [148]}, {"text": "part - of - speech embeddings", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["part", "-", "of", "-", "speech", "embeddings"], "offsets": [124, 125, 126, 127, 128, 129]}, {"text": "dependency - based embeddings", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["dependency", "-", "based", "embeddings"], "offsets": [131, 132, 133, 134]}, {"text": "contextualized embeddings", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["contextualized", "embeddings"], "offsets": [136, 137]}], "trigger": {"text": "combine", "tokens": ["combine"], "offsets": [123]}}, {"event_type": "PUR", "arguments": [{"text": "performance of the aspect extractor", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["performance", "of", "the", "aspect", "extractor"], "offsets": [150, 151, 152, 153, 154]}], "trigger": {"text": "enhance", "tokens": ["enhance"], "offsets": [148]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [156]}, {"text": "syntactic relative distance", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["syntactic", "relative", "distance"], "offsets": [160, 161, 162]}, {"text": "de - emphasize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["de", "-", "emphasize"], "offsets": [164, 165, 166]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [158]}}, {"event_type": "PUR", "arguments": [{"text": "adverse effects of unrelated words", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["adverse", "effects", "of", "unrelated", "words"], "offsets": [168, 169, 170, 171, 172]}], "trigger": {"text": "de - emphasize", "tokens": ["de", "-", "emphasize"], "offsets": [164, 165, 166]}}, {"event_type": "CMP", "arguments": [{"text": "state - of - the - art models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "models"], "offsets": [197, 198, 199, 200, 201, 202, 203, 204]}, {"text": "semeval - 2014 dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["semeval", "-", "2014", "dataset"], "offsets": [206, 207, 208, 209]}, {"text": "end - to - end absa solution", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["end", "-", "to", "-", "end", "absa", "solution"], "offsets": [35, 36, 37, 38, 39, 40, 41]}, {"text": "in both two subtasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "both", "two", "subtasks"], "offsets": [210, 211, 212, 213]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [195]}}, {"event_type": "FAC", "arguments": [{"text": "accuracy of the aspect sentiment classifier", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["accuracy", "of", "the", "aspect", "sentiment", "classifier"], "offsets": [186, 187, 188, 189, 190, 191]}], "trigger": {"text": "increases", "tokens": ["increases"], "offsets": [184]}}], "document": ["the", "aspect", "-", "based", "sentiment", "analysis", "(", "absa", ")", "consists", "of", "two", "conceptual", "tasks", ",", "namely", "an", "aspect", "extraction", "and", "an", "aspect", "sentiment", "classification", ".", "rather", "than", "considering", "the", "tasks", "separately", ",", "we", "build", "an", "end", "-", "to", "-", "end", "absa", "solution", ".", "previous", "works", "in", "absa", "tasks", "did", "not", "fully", "leverage", "the", "importance", "of", "syntactical", "information", ".", "hence", ",", "the", "aspect", "extraction", "model", "often", "failed", "to", "detect", "the", "boundaries", "of", "multi", "-", "word", "aspect", "terms", ".", "on", "the", "other", "hand", ",", "the", "aspect", "sentiment", "classifier", "was", "unable", "to", "account", "for", "the", "syntactical", "correlation", "between", "aspect", "terms", "and", "the", "context", "words", ".", "this", "paper", "explores", "the", "grammatical", "aspect", "of", "the", "sentence", "and", "employs", "the", "self", "-", "attention", "mechanism", "for", "syntactical", "learning", ".", "we", "combine", "part", "-", "of", "-", "speech", "embeddings", ",", "dependency", "-", "based", "embeddings", "and", "contextualized", "embeddings", "(", "e", ".", "g", ".", "bert", ",", "roberta", ")", "to", "enhance", "the", "performance", "of", "the", "aspect", "extractor", ".", "we", "also", "propose", "the", "syntactic", "relative", "distance", "to", "de", "-", "emphasize", "the", "adverse", "effects", "of", "unrelated", "words", ",", "having", "weak", "syntactic", "connection", "with", "the", "aspect", "terms", ".", "this", "increases", "the", "accuracy", "of", "the", "aspect", "sentiment", "classifier", ".", "our", "solutions", "outperform", "the", "state", "-", "of", "-", "the", "-", "art", "models", "on", "semeval", "-", "2014", "dataset", "in", "both", "two", "subtasks", "."]}, {"venue": "ACL", "title": "Show, Describe and Conclude: On Exploiting the Structure Information of Chest X-ray Reports", "abstract": "Chest X-Ray (CXR) images are commonly used for clinical screening and diagnosis. Automatically writing reports for these images can considerably lighten the workload of radiologists for summarizing descriptive findings and conclusive impressions. The complex structures between and within sections of the reports pose a great challenge to the automatic report generation. Specifically, the section Impression is a diagnostic summarization over the section Findings; and the appearance of normality dominates each section over that of abnormality. Existing studies rarely explore and consider this fundamental structure information. In this work, we propose a novel framework which exploits the structure information between and within report sections for generating CXR imaging reports. First, we propose a two-stage strategy that explicitly models the relationship between Findings and Impression. Second, we design a novel co-operative multi-agent system that implicitly captures the imbalanced distribution between abnormality and normality. Experiments on two CXR report datasets show that our method achieves state-of-the-art performance in terms of various evaluation metrics. Our results expose that the proposed approach is able to generate high-quality medical reports through integrating the structure information.", "doc_id": "078b73a788649b061a84ee2d67fa56dd", "publication_year": 2019, "sentences": ["chest x - ray ( cxr ) images are commonly used for clinical screening and diagnosis .", "automatically writing reports for these images can considerably lighten the workload of radiologists for summarizing descriptive findings and conclusive impressions .", "the complex structures between and within sections of the reports pose a great challenge to the automatic report generation .", "specifically , the section impression is a diagnostic summarization over the section findings ; and the appearance of normality dominates each section over that of abnormality .", "existing studies rarely explore and consider this fundamental structure information .", "in this work , we propose a novel framework which exploits the structure information between and within report sections for generating cxr imaging reports .", "first , we propose a two - stage strategy that explicitly models the relationship between findings and impression .", "second , we design a novel co - operative multi - agent system that implicitly captures the imbalanced distribution between abnormality and normality .", "experiments on two cxr report datasets show that our method achieves state - of - the - art performance in terms of various evaluation metrics .", "our results expose that the proposed approach is able to generate high - quality medical reports through integrating the structure information ."], "events": [{"event_type": "FAC", "arguments": [{"text": "two cxr report datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "cxr", "report", "datasets"], "offsets": [166, 167, 168, 169]}, {"text": "framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["framework"], "offsets": [104]}, {"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [175, 176, 177, 178, 179, 180, 181, 182]}, {"text": "in terms of various evaluation metrics", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "terms", "of", "various", "evaluation", "metrics"], "offsets": [183, 184, 185, 186, 187, 188]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [174]}}], "document": ["chest", "x", "-", "ray", "(", "cxr", ")", "images", "are", "commonly", "used", "for", "clinical", "screening", "and", "diagnosis", ".", "automatically", "writing", "reports", "for", "these", "images", "can", "considerably", "lighten", "the", "workload", "of", "radiologists", "for", "summarizing", "descriptive", "findings", "and", "conclusive", "impressions", ".", "the", "complex", "structures", "between", "and", "within", "sections", "of", "the", "reports", "pose", "a", "great", "challenge", "to", "the", "automatic", "report", "generation", ".", "specifically", ",", "the", "section", "impression", "is", "a", "diagnostic", "summarization", "over", "the", "section", "findings", ";", "and", "the", "appearance", "of", "normality", "dominates", "each", "section", "over", "that", "of", "abnormality", ".", "existing", "studies", "rarely", "explore", "and", "consider", "this", "fundamental", "structure", "information", ".", "in", "this", "work", ",", "we", "propose", "a", "novel", "framework", "which", "exploits", "the", "structure", "information", "between", "and", "within", "report", "sections", "for", "generating", "cxr", "imaging", "reports", ".", "first", ",", "we", "propose", "a", "two", "-", "stage", "strategy", "that", "explicitly", "models", "the", "relationship", "between", "findings", "and", "impression", ".", "second", ",", "we", "design", "a", "novel", "co", "-", "operative", "multi", "-", "agent", "system", "that", "implicitly", "captures", "the", "imbalanced", "distribution", "between", "abnormality", "and", "normality", ".", "experiments", "on", "two", "cxr", "report", "datasets", "show", "that", "our", "method", "achieves", "state", "-", "of", "-", "the", "-", "art", "performance", "in", "terms", "of", "various", "evaluation", "metrics", ".", "our", "results", "expose", "that", "the", "proposed", "approach", "is", "able", "to", "generate", "high", "-", "quality", "medical", "reports", "through", "integrating", "the", "structure", "information", "."]}, {"venue": "ACL", "title": "Towards Generative Aspect-Based Sentiment Analysis", "abstract": "Aspect-based sentiment analysis (ABSA) has received increasing attention recently. Most existing work tackles ABSA in a discriminative manner, designing various task-specific classification networks for the prediction. Despite their effectiveness, these methods ignore the rich label semantics in ABSA problems and require extensive task-specific designs. In this paper, we propose to tackle various ABSA tasks in a unified generative framework. Two types of paradigms, namely annotation-style and extraction-style modeling, are designed to enable the training process by formulating each ABSA task as a text generation problem. We conduct experiments on four ABSA tasks across multiple benchmark datasets where our proposed generative approach achieves new state-of-the-art results in almost all cases. This also validates the strong generality of the proposed framework which can be easily adapted to arbitrary ABSA task without additional task-specific model design.", "doc_id": "296c78ead71847805103df1854299499", "publication_year": 2021, "sentences": ["aspect - based sentiment analysis ( absa ) has received increasing attention recently .", "most existing work tackles absa in a discriminative manner , designing various task - specific classification networks for the prediction .", "despite their effectiveness , these methods ignore the rich label semantics in absa problems and require extensive task - specific designs .", "in this paper , we propose to tackle various absa tasks in a unified generative framework .", "two types of paradigms , namely annotation - style and extraction - style modeling , are designed to enable the training process by formulating each absa task as a text generation problem .", "we conduct experiments on four absa tasks across multiple benchmark datasets where our proposed generative approach achieves new state - of - the - art results in almost all cases .", "this also validates the strong generality of the proposed framework which can be easily adapted to arbitrary absa task without additional task - specific model design ."], "events": [{"event_type": "ITT", "arguments": [{"text": "aspect - based sentiment analysis", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["aspect", "-", "based", "sentiment", "analysis"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "received", "tokens": ["received"], "offsets": [9]}}, {"event_type": "RWF", "arguments": [{"text": "ignore", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["ignore"], "offsets": [41]}, {"text": "existing work", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "work"], "offsets": [15, 16]}], "trigger": {"text": "ignore", "tokens": ["ignore"], "offsets": [41]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [61]}, {"text": "various absa tasks", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["various", "absa", "tasks"], "offsets": [65, 66, 67]}, {"text": "in a unified generative framework", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "unified", "generative", "framework"], "offsets": [68, 69, 70, 71, 72]}], "trigger": {"text": "tackle", "tokens": ["tackle"], "offsets": [64]}}, {"event_type": "WKS", "arguments": [{"text": "annotation - style and extraction - style modeling", "nugget_type": "APP", "argument_type": "Content", "tokens": ["annotation", "-", "style", "and", "extraction", "-", "style", "modeling"], "offsets": [80, 81, 82, 83, 84, 85, 86, 87]}, {"text": "enable", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enable"], "offsets": [92]}], "trigger": {"text": "designed", "tokens": ["designed"], "offsets": [90]}}, {"event_type": "PUR", "arguments": [{"text": "training process", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["training", "process"], "offsets": [94, 95]}], "trigger": {"text": "enable", "tokens": ["enable"], "offsets": [92]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [107]}, {"text": "multiple benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multiple", "benchmark", "datasets"], "offsets": [115, 116, 117]}, {"text": "experiments", "nugget_type": "APP", "argument_type": "Content", "tokens": ["experiments"], "offsets": [109]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [108]}}, {"event_type": "FAC", "arguments": [{"text": "strong generality", "nugget_type": "STR", "argument_type": "Object", "tokens": ["strong", "generality"], "offsets": [142, 143]}], "trigger": {"text": "validates", "tokens": ["validates"], "offsets": [140]}}, {"event_type": "RWS", "arguments": [{"text": "most existing work", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["most", "existing", "work"], "offsets": [14, 15, 16]}, {"text": "aspect - based sentiment analysis", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["aspect", "-", "based", "sentiment", "analysis"], "offsets": [0, 1, 2, 3, 4]}, {"text": "various task - specific classification networks", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["various", "task", "-", "specific", "classification", "networks"], "offsets": [25, 26, 27, 28, 29, 30]}], "trigger": {"text": "designing", "tokens": ["designing"], "offsets": [24]}}], "document": ["aspect", "-", "based", "sentiment", "analysis", "(", "absa", ")", "has", "received", "increasing", "attention", "recently", ".", "most", "existing", "work", "tackles", "absa", "in", "a", "discriminative", "manner", ",", "designing", "various", "task", "-", "specific", "classification", "networks", "for", "the", "prediction", ".", "despite", "their", "effectiveness", ",", "these", "methods", "ignore", "the", "rich", "label", "semantics", "in", "absa", "problems", "and", "require", "extensive", "task", "-", "specific", "designs", ".", "in", "this", "paper", ",", "we", "propose", "to", "tackle", "various", "absa", "tasks", "in", "a", "unified", "generative", "framework", ".", "two", "types", "of", "paradigms", ",", "namely", "annotation", "-", "style", "and", "extraction", "-", "style", "modeling", ",", "are", "designed", "to", "enable", "the", "training", "process", "by", "formulating", "each", "absa", "task", "as", "a", "text", "generation", "problem", ".", "we", "conduct", "experiments", "on", "four", "absa", "tasks", "across", "multiple", "benchmark", "datasets", "where", "our", "proposed", "generative", "approach", "achieves", "new", "state", "-", "of", "-", "the", "-", "art", "results", "in", "almost", "all", "cases", ".", "this", "also", "validates", "the", "strong", "generality", "of", "the", "proposed", "framework", "which", "can", "be", "easily", "adapted", "to", "arbitrary", "absa", "task", "without", "additional", "task", "-", "specific", "model", "design", "."]}, {"venue": "ACL", "title": "Attend What You Need: Motion-Appearance Synergistic Networks for Video Question Answering", "abstract": "Video Question Answering is a task which requires an AI agent to answer questions grounded in video. This task entails three key challenges: (1) understand the intention of various questions, (2) capturing various elements of the input video (e.g., object, action, causality), and (3) cross-modal grounding between language and vision information. We propose Motion-Appearance Synergistic Networks (MASN), which embed two cross-modal features grounded on motion and appearance information and selectively utilize them depending on the question\u2019s intentions. MASN consists of a motion module, an appearance module, and a motion-appearance fusion module. The motion module computes the action-oriented cross-modal joint representations, while the appearance module focuses on the appearance aspect of the input video. Finally, the motion-appearance fusion module takes each output of the motion module and the appearance module as input, and performs question-guided fusion. As a result, MASN achieves new state-of-the-art performance on the TGIF-QA and MSVD-QA datasets. We also conduct qualitative analysis by visualizing the inference results of MASN.", "doc_id": "255a7a6f89a3ca67009a4d371124ad86", "publication_year": 2021, "sentences": ["video question answering is a task which requires an ai agent to answer questions grounded in video .", "this task entails three key challenges : ( 1 ) understand the intention of various questions , ( 2 ) capturing various elements of the input video ( e . g . , object , action , causality ) , and ( 3 ) cross - modal grounding between language and vision information .", "we propose motion - appearance synergistic networks ( masn ) , which embed two cross - modal features grounded on motion and appearance information and selectively utilize them depending on the question \u2019 s intentions .", "masn consists of a motion module , an appearance module , and a motion - appearance fusion module .", "the motion module computes the action - oriented cross - modal joint representations , while the appearance module focuses on the appearance aspect of the input video .", "finally , the motion - appearance fusion module takes each output of the motion module and the appearance module as input , and performs question - guided fusion .", "as a result , masn achieves new state - of - the - art performance on the tgif - qa and msvd - qa datasets .", "we also conduct qualitative analysis by visualizing the inference results of masn ."], "events": [{"event_type": "ITT", "arguments": [{"text": "video question answering", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["video", "question", "answering"], "offsets": [0, 1, 2]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [5]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [72]}, {"text": "motion - appearance synergistic networks", "nugget_type": "APP", "argument_type": "Content", "tokens": ["motion", "-", "appearance", "synergistic", "networks"], "offsets": [74, 75, 76, 77, 78]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [73]}}, {"event_type": "MDS", "arguments": [{"text": "two cross - modal features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["two", "cross", "-", "modal", "features"], "offsets": [85, 86, 87, 88, 89]}, {"text": "depending on the question \u2019 s intentions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["depending", "on", "the", "question", "\u2019", "s", "intentions"], "offsets": [100, 101, 102, 103, 104, 105, 106]}], "trigger": {"text": "selectively utilize", "tokens": ["selectively", "utilize"], "offsets": [97, 98]}}, {"event_type": "FAC", "arguments": [{"text": "motion - appearance synergistic networks", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["motion", "-", "appearance", "synergistic", "networks"], "offsets": [74, 75, 76, 77, 78]}, {"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [191, 192, 193, 194, 195, 196, 197, 198]}, {"text": "tgif - qa", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["tgif", "-", "qa"], "offsets": [201, 202, 203]}, {"text": "msvd - qa", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["msvd", "-", "qa"], "offsets": [205, 206, 207]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [189]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [210]}, {"text": "qualitative analysis", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["qualitative", "analysis"], "offsets": [213, 214]}, {"text": "inference results of masn", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["inference", "results", "of", "masn"], "offsets": [218, 219, 220, 221]}], "trigger": {"text": "visualizing", "tokens": ["visualizing"], "offsets": [216]}}, {"event_type": "WKS", "arguments": [{"text": "motion module", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["motion", "module"], "offsets": [112, 113]}, {"text": "appearance module", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["appearance", "module"], "offsets": [116, 117]}, {"text": "motion - appearance fusion module", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["motion", "-", "appearance", "fusion", "module"], "offsets": [121, 122, 123, 124, 125]}], "trigger": {"text": "consists", "tokens": ["consists"], "offsets": [109]}}, {"event_type": "MDS", "arguments": [{"text": "action - oriented cross - modal joint representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["action", "-", "oriented", "cross", "-", "modal", "joint", "representations"], "offsets": [132, 133, 134, 135, 136, 137, 138, 139]}, {"text": "motion module", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["motion", "module"], "offsets": [128, 129]}], "trigger": {"text": "computes", "tokens": ["computes"], "offsets": [130]}}, {"event_type": "WKS", "arguments": [{"text": "appearance module", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["appearance", "module"], "offsets": [143, 144]}, {"text": "appearance aspect of the input video", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["appearance", "aspect", "of", "the", "input", "video"], "offsets": [148, 149, 150, 151, 152, 153]}], "trigger": {"text": "focuses", "tokens": ["focuses"], "offsets": [145]}}, {"event_type": "MDS", "arguments": [{"text": "motion - appearance fusion module", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["motion", "-", "appearance", "fusion", "module"], "offsets": [158, 159, 160, 161, 162]}, {"text": "performs", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["performs"], "offsets": [178]}, {"text": "each output of", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["each", "output", "of"], "offsets": [164, 165, 166]}, {"text": "output of the motion module", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["output", "of", "the", "motion", "module"], "offsets": [165, 166, 167, 168, 169]}, {"text": "output of the appearance module", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["output", "of", "the", "appearance", "module"], "offsets": [165, 166, 171, 172, 173]}], "trigger": {"text": "takes", "tokens": ["takes"], "offsets": [163]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [72]}, {"text": "two cross - modal features", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["two", "cross", "-", "modal", "features"], "offsets": [85, 86, 87, 88, 89]}, {"text": "grounded on motion and appearance information", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["grounded", "on", "motion", "and", "appearance", "information"], "offsets": [90, 91, 92, 93, 94, 95]}], "trigger": {"text": "embed", "tokens": ["embed"], "offsets": [84]}}, {"event_type": "PUR", "arguments": [{"text": "question - guided fusion", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["question", "-", "guided", "fusion"], "offsets": [179, 180, 181, 182]}], "trigger": {"text": "performs", "tokens": ["performs"], "offsets": [178]}}], "document": ["video", "question", "answering", "is", "a", "task", "which", "requires", "an", "ai", "agent", "to", "answer", "questions", "grounded", "in", "video", ".", "this", "task", "entails", "three", "key", "challenges", ":", "(", "1", ")", "understand", "the", "intention", "of", "various", "questions", ",", "(", "2", ")", "capturing", "various", "elements", "of", "the", "input", "video", "(", "e", ".", "g", ".", ",", "object", ",", "action", ",", "causality", ")", ",", "and", "(", "3", ")", "cross", "-", "modal", "grounding", "between", "language", "and", "vision", "information", ".", "we", "propose", "motion", "-", "appearance", "synergistic", "networks", "(", "masn", ")", ",", "which", "embed", "two", "cross", "-", "modal", "features", "grounded", "on", "motion", "and", "appearance", "information", "and", "selectively", "utilize", "them", "depending", "on", "the", "question", "\u2019", "s", "intentions", ".", "masn", "consists", "of", "a", "motion", "module", ",", "an", "appearance", "module", ",", "and", "a", "motion", "-", "appearance", "fusion", "module", ".", "the", "motion", "module", "computes", "the", "action", "-", "oriented", "cross", "-", "modal", "joint", "representations", ",", "while", "the", "appearance", "module", "focuses", "on", "the", "appearance", "aspect", "of", "the", "input", "video", ".", "finally", ",", "the", "motion", "-", "appearance", "fusion", "module", "takes", "each", "output", "of", "the", "motion", "module", "and", "the", "appearance", "module", "as", "input", ",", "and", "performs", "question", "-", "guided", "fusion", ".", "as", "a", "result", ",", "masn", "achieves", "new", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "the", "tgif", "-", "qa", "and", "msvd", "-", "qa", "datasets", ".", "we", "also", "conduct", "qualitative", "analysis", "by", "visualizing", "the", "inference", "results", "of", "masn", "."]}, {"venue": "ACL", "title": "Sentence Centrality Revisited for Unsupervised Summarization", "abstract": "Single document summarization has enjoyed renewed interest in recent years thanks to the popularity of neural network models and the availability of large-scale datasets. In this paper we develop an unsupervised approach arguing that it is unrealistic to expect large-scale and high-quality training data to be available or created for different types of summaries, domains, or languages. We revisit a popular graph-based ranking algorithm and modify how node (aka sentence) centrality is computed in two ways: (a) we employ BERT, a state-of-the-art neural representation learning model to better capture sentential meaning and (b) we build graphs with directed edges arguing that the contribution of any two nodes to their respective centrality is influenced by their relative position in a document. Experimental results on three news summarization datasets representative of different languages and writing styles show that our approach outperforms strong baselines by a wide margin.", "doc_id": "4cdc92b7f8c35f0b6ccc9c8dcd9763cf", "publication_year": 2019, "sentences": ["single document summarization has enjoyed renewed interest in recent years thanks to the popularity of neural network models and the availability of large - scale datasets .", "in this paper we develop an unsupervised approach arguing that it is unrealistic to expect large - scale and high - quality training data to be available or created for different types of summaries , domains , or languages .", "we revisit a popular graph - based ranking algorithm and modify how node ( aka sentence ) centrality is computed in two ways : ( a ) we employ bert , a state - of - the - art neural representation learning model to better capture sentential meaning and ( b ) we build graphs with directed edges arguing that the contribution of any two nodes to their respective centrality is influenced by their relative position in a document .", "experimental results on three news summarization datasets representative of different languages and writing styles show that our approach outperforms strong baselines by a wide margin ."], "events": [{"event_type": "ITT", "arguments": [{"text": "single document summarization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["single", "document", "summarization"], "offsets": [0, 1, 2]}], "trigger": {"text": "enjoyed", "tokens": ["enjoyed"], "offsets": [4]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [30]}, {"text": "unsupervised approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["unsupervised", "approach"], "offsets": [33, 34]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [31]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [94]}, {"text": "bert", "nugget_type": "APP", "argument_type": "Content", "tokens": ["bert"], "offsets": [96]}, {"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [112]}], "trigger": {"text": "employ", "tokens": ["employ"], "offsets": [95]}}, {"event_type": "PUR", "arguments": [{"text": "sentential meaning", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["sentential", "meaning"], "offsets": [113, 114]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [112]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [119]}, {"text": "graphs with directed edges", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["graphs", "with", "directed", "edges"], "offsets": [121, 122, 123, 124]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [120]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [165]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [161]}}, {"event_type": "CMP", "arguments": [{"text": "unsupervised approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unsupervised", "approach"], "offsets": [33, 34]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [165]}, {"text": "strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines"], "offsets": [166, 167]}, {"text": "by a wide margin", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["by", "a", "wide", "margin"], "offsets": [168, 169, 170, 171]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [165]}}], "document": ["single", "document", "summarization", "has", "enjoyed", "renewed", "interest", "in", "recent", "years", "thanks", "to", "the", "popularity", "of", "neural", "network", "models", "and", "the", "availability", "of", "large", "-", "scale", "datasets", ".", "in", "this", "paper", "we", "develop", "an", "unsupervised", "approach", "arguing", "that", "it", "is", "unrealistic", "to", "expect", "large", "-", "scale", "and", "high", "-", "quality", "training", "data", "to", "be", "available", "or", "created", "for", "different", "types", "of", "summaries", ",", "domains", ",", "or", "languages", ".", "we", "revisit", "a", "popular", "graph", "-", "based", "ranking", "algorithm", "and", "modify", "how", "node", "(", "aka", "sentence", ")", "centrality", "is", "computed", "in", "two", "ways", ":", "(", "a", ")", "we", "employ", "bert", ",", "a", "state", "-", "of", "-", "the", "-", "art", "neural", "representation", "learning", "model", "to", "better", "capture", "sentential", "meaning", "and", "(", "b", ")", "we", "build", "graphs", "with", "directed", "edges", "arguing", "that", "the", "contribution", "of", "any", "two", "nodes", "to", "their", "respective", "centrality", "is", "influenced", "by", "their", "relative", "position", "in", "a", "document", ".", "experimental", "results", "on", "three", "news", "summarization", "datasets", "representative", "of", "different", "languages", "and", "writing", "styles", "show", "that", "our", "approach", "outperforms", "strong", "baselines", "by", "a", "wide", "margin", "."]}, {"venue": "ACL", "title": "Are VQA Systems RAD? Measuring Robustness to Augmented Data with Focused Interventions", "abstract": "Deep learning algorithms have shown promising results in visual question answering (VQA) tasks, but a more careful look reveals that they often do not understand the rich signal they are being fed with. To understand and better measure the generalization capabilities of VQA systems, we look at their robustness to counterfactually augmented data. Our proposed augmentations are designed to make a focused intervention on a specific property of the question such that the answer changes. Using these augmentations, we propose a new robustness measure, Robustness to Augmented Data (RAD), which measures the consistency of model predictions between original and augmented examples. Through extensive experimentation, we show that RAD, unlike classical accuracy measures, can quantify when state-of-the-art systems are not robust to counterfactuals. We find substantial failure cases which reveal that current VQA systems are still brittle. Finally, we connect between robustness and generalization, demonstrating the predictive power of RAD for performance on unseen augmentations.", "doc_id": "05931f213d055ec93f928dbdff1e3d81", "publication_year": 2021, "sentences": ["deep learning algorithms have shown promising results in visual question answering ( vqa ) tasks , but a more careful look reveals that they often do not understand the rich signal they are being fed with .", "to understand and better measure the generalization capabilities of vqa systems , we look at their robustness to counterfactually augmented data .", "our proposed augmentations are designed to make a focused intervention on a specific property of the question such that the answer changes .", "using these augmentations , we propose a new robustness measure , robustness to augmented data ( rad ) , which measures the consistency of model predictions between original and augmented examples .", "through extensive experimentation , we show that rad , unlike classical accuracy measures , can quantify when state - of - the - art systems are not robust to counterfactuals .", "we find substantial failure cases which reveal that current vqa systems are still brittle .", "finally , we connect between robustness and generalization , demonstrating the predictive power of rad for performance on unseen augmentations ."], "events": [{"event_type": "ITT", "arguments": [{"text": "deep learning algorithms", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["deep", "learning", "algorithms"], "offsets": [0, 1, 2]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "deep learning algorithms", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["deep", "learning", "algorithms"], "offsets": [0, 1, 2]}, {"text": "often", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["often"], "offsets": [24]}, {"text": "not understand", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "understand"], "offsets": [26, 27]}], "trigger": {"text": "not understand", "tokens": ["not", "understand"], "offsets": [26, 27]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [49]}, {"text": "counterfactually augmented data", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["counterfactually", "augmented", "data"], "offsets": [55, 56, 57]}, {"text": "their robustness", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["visual", "question", "answering", "systems", "robustness"], "offsets": [8, 9, 10, 47, 53]}, {"text": "understand and better measure", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["understand", "and", "better", "measure"], "offsets": [38, 39, 40, 41]}], "trigger": {"text": "look", "tokens": ["look"], "offsets": [50]}}, {"event_type": "PUR", "arguments": [{"text": "generalization capabilities of vqa systems", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["generalization", "capabilities", "of", "visual", "question", "answering", "systems"], "offsets": [43, 44, 45, 8, 9, 10, 47]}], "trigger": {"text": "understand and better measure", "tokens": ["understand", "and", "better", "measure"], "offsets": [38, 39, 40, 41]}}, {"event_type": "WKS", "arguments": [{"text": "specific property of the question", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["specific", "property", "of", "the", "question"], "offsets": [71, 72, 73, 74, 75]}, {"text": "changes", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["changes"], "offsets": [80]}], "trigger": {"text": "make a focused intervention", "tokens": ["make", "a", "focused", "intervention"], "offsets": [65, 66, 67, 68]}}, {"event_type": "PUR", "arguments": [{"text": "answer", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["answer"], "offsets": [79]}], "trigger": {"text": "changes", "tokens": ["changes"], "offsets": [80]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [86]}, {"text": "robustness measure", "nugget_type": "APP", "argument_type": "Content", "tokens": ["robustness", "measure"], "offsets": [90, 91]}, {"text": "robustness to augmented data", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["robustness", "to", "augmented", "data"], "offsets": [93, 94, 95, 96]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [87]}}, {"event_type": "WKS", "arguments": [{"text": "consistency of model predictions", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["consistency", "of", "model", "predictions"], "offsets": [104, 105, 106, 107]}, {"text": "between original and augmented examples", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "original", "and", "augmented", "examples"], "offsets": [108, 109, 110, 111, 112]}], "trigger": {"text": "measures", "tokens": ["measures"], "offsets": [102]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [118]}, {"text": "can quantify", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["can", "quantify"], "offsets": [128, 129]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [119]}}, {"event_type": "FAC", "arguments": [{"text": "robustness to augmented data", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["robustness", "to", "augmented", "data"], "offsets": [93, 94, 95, 96]}, {"text": "when state - of - the - art systems are not robust to counterfactuals", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "state", "-", "of", "-", "the", "-", "art", "systems", "are", "not", "robust", "to", "counterfactuals"], "offsets": [130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143]}], "trigger": {"text": "can quantify", "tokens": ["can", "quantify"], "offsets": [128, 129]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [145]}, {"text": "brittle", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["brittle"], "offsets": [158]}], "trigger": {"text": "reveal", "tokens": ["reveal"], "offsets": [151]}}, {"event_type": "FAC", "arguments": [{"text": "current vqa systems", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["current", "visual", "question", "answering", "systems"], "offsets": [153, 8, 9, 10, 155]}], "trigger": {"text": "brittle", "tokens": ["brittle"], "offsets": [158]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [162]}, {"text": "robustness", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["robustness"], "offsets": [165]}, {"text": "generalization", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["generalization"], "offsets": [167]}], "trigger": {"text": "connect", "tokens": ["connect"], "offsets": [163]}}, {"event_type": "FAC", "arguments": [{"text": "on unseen augmentations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "unseen", "augmentations"], "offsets": [177, 178, 179]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["performance"], "offsets": [176]}, {"text": "predictive power of rad", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["predictive", "power", "of", "robustness", "to", "augmented", "data"], "offsets": [171, 172, 173, 93, 94, 95, 96]}], "trigger": {"text": "demonstrating", "tokens": ["demonstrating"], "offsets": [169]}}], "document": ["deep", "learning", "algorithms", "have", "shown", "promising", "results", "in", "visual", "question", "answering", "(", "vqa", ")", "tasks", ",", "but", "a", "more", "careful", "look", "reveals", "that", "they", "often", "do", "not", "understand", "the", "rich", "signal", "they", "are", "being", "fed", "with", ".", "to", "understand", "and", "better", "measure", "the", "generalization", "capabilities", "of", "vqa", "systems", ",", "we", "look", "at", "their", "robustness", "to", "counterfactually", "augmented", "data", ".", "our", "proposed", "augmentations", "are", "designed", "to", "make", "a", "focused", "intervention", "on", "a", "specific", "property", "of", "the", "question", "such", "that", "the", "answer", "changes", ".", "using", "these", "augmentations", ",", "we", "propose", "a", "new", "robustness", "measure", ",", "robustness", "to", "augmented", "data", "(", "rad", ")", ",", "which", "measures", "the", "consistency", "of", "model", "predictions", "between", "original", "and", "augmented", "examples", ".", "through", "extensive", "experimentation", ",", "we", "show", "that", "rad", ",", "unlike", "classical", "accuracy", "measures", ",", "can", "quantify", "when", "state", "-", "of", "-", "the", "-", "art", "systems", "are", "not", "robust", "to", "counterfactuals", ".", "we", "find", "substantial", "failure", "cases", "which", "reveal", "that", "current", "vqa", "systems", "are", "still", "brittle", ".", "finally", ",", "we", "connect", "between", "robustness", "and", "generalization", ",", "demonstrating", "the", "predictive", "power", "of", "rad", "for", "performance", "on", "unseen", "augmentations", "."]}, {"venue": "ACL", "title": "Mind Your Outliers! Investigating the Negative Impact of Outliers on Active Learning for Visual Question Answering", "abstract": "Active learning promises to alleviate the massive data needs of supervised machine learning: it has successfully improved sample efficiency by an order of magnitude on traditional tasks like topic classification and object recognition. However, we uncover a striking contrast to this promise: across 5 models and 4 datasets on the task of visual question answering, a wide variety of active learning approaches fail to outperform random selection. To understand this discrepancy, we profile 8 active learning methods on a per-example basis, and identify the problem as collective outliers \u2013 groups of examples that active learning methods prefer to acquire but models fail to learn (e.g., questions that ask about text in images or require external knowledge). Through systematic ablation experiments and qualitative visualizations, we verify that collective outliers are a general phenomenon responsible for degrading pool-based active learning. Notably, we show that active learning sample efficiency increases significantly as the number of collective outliers in the active learning pool decreases. We conclude with a discussion and prescriptive recommendations for mitigating the effects of these outliers in future work.", "doc_id": "9355beaa3f455da86d368025274566c9", "publication_year": 2021, "sentences": ["active learning promises to alleviate the massive data needs of supervised machine learning : it has successfully improved sample efficiency by an order of magnitude on traditional tasks like topic classification and object recognition .", "however , we uncover a striking contrast to this promise : across 5 models and 4 datasets on the task of visual question answering , a wide variety of active learning approaches fail to outperform random selection .", "to understand this discrepancy , we profile 8 active learning methods on a per - example basis , and identify the problem as collective outliers \u2013 groups of examples that active learning methods prefer to acquire but models fail to learn ( e . g . , questions that ask about text in images or require external knowledge ) .", "through systematic ablation experiments and qualitative visualizations , we verify that collective outliers are a general phenomenon responsible for degrading pool - based active learning .", "notably , we show that active learning sample efficiency increases significantly as the number of collective outliers in the active learning pool decreases .", "we conclude with a discussion and prescriptive recommendations for mitigating the effects of these outliers in future work ."], "events": [{"event_type": "ITT", "arguments": [{"text": "supervised machine learning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["supervised", "machine", "learning"], "offsets": [10, 11, 12]}], "trigger": {"text": "alleviate", "tokens": ["alleviate"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "fail", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["fail"], "offsets": [67]}, {"text": "active learning approaches", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["active", "learning", "approaches"], "offsets": [64, 65, 66]}, {"text": "across 5 models and 4 datasets on the task of visual question answering", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "5", "models", "and", "4", "datasets", "on", "the", "task", "of", "visual", "question", "answering"], "offsets": [46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58]}], "trigger": {"text": "fail", "tokens": ["fail"], "offsets": [67]}}, {"event_type": "WKS", "arguments": [{"text": "understand", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["understand"], "offsets": [74]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [78]}, {"text": "8 active learning methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["8", "active", "learning", "methods"], "offsets": [80, 81, 82, 83]}, {"text": "on a per - example basis", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "a", "per", "-", "example", "basis"], "offsets": [84, 85, 86, 87, 88, 89]}], "trigger": {"text": "profile", "tokens": ["profile"], "offsets": [79]}}, {"event_type": "PUR", "arguments": [{"text": "discrepancy", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["discrepancy"], "offsets": [76]}], "trigger": {"text": "understand", "tokens": ["understand"], "offsets": [74]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [78]}, {"text": "collective outliers", "nugget_type": "APP", "argument_type": "Content", "tokens": ["collective", "outliers"], "offsets": [96, 97]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [92]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [141]}, {"text": "general phenomenon responsible", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["general", "phenomenon", "responsible"], "offsets": [148, 149, 150]}], "trigger": {"text": "verify", "tokens": ["verify"], "offsets": [142]}}, {"event_type": "FAC", "arguments": [{"text": "collective outliers", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["collective", "outliers"], "offsets": [144, 145]}, {"text": "degrading", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["degrading"], "offsets": [152]}, {"text": "through systematic ablation experiments and qualitative visualizations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "systematic", "ablation", "experiments", "and", "qualitative", "visualizations"], "offsets": [133, 134, 135, 136, 137, 138, 139]}], "trigger": {"text": "general phenomenon responsible", "tokens": ["general", "phenomenon", "responsible"], "offsets": [148, 149, 150]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [161]}, {"text": "increases significantly", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["increases", "significantly"], "offsets": [168, 169]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [162]}}, {"event_type": "FAC", "arguments": [{"text": "active learning sample efficiency", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["active", "learning", "sample", "efficiency"], "offsets": [164, 165, 166, 167]}, {"text": "as the number of collective outliers in the active learning pool decreases", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "the", "number", "of", "collective", "outliers", "in", "the", "active", "learning", "pool", "decreases"], "offsets": [170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181]}], "trigger": {"text": "increases significantly", "tokens": ["increases", "significantly"], "offsets": [168, 169]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [183]}, {"text": "mitigating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["mitigating"], "offsets": [192]}, {"text": "discussion and prescriptive recommendations", "nugget_type": "APP", "argument_type": "Content", "tokens": ["discussion", "and", "prescriptive", "recommendations"], "offsets": [187, 188, 189, 190]}], "trigger": {"text": "conclude", "tokens": ["conclude"], "offsets": [184]}}, {"event_type": "PUR", "arguments": [{"text": "effects of these outliers", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["effects", "of", "these", "outliers"], "offsets": [194, 195, 196, 197]}], "trigger": {"text": "mitigating", "tokens": ["mitigating"], "offsets": [192]}}, {"event_type": "PUR", "arguments": [{"text": "random selection", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["random", "selection"], "offsets": [70, 71]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [69]}}, {"event_type": "PUR", "arguments": [{"text": "pool - based active learning", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["pool", "-", "based", "active", "learning"], "offsets": [153, 154, 155, 156, 157]}], "trigger": {"text": "degrading", "tokens": ["degrading"], "offsets": [152]}}], "document": ["active", "learning", "promises", "to", "alleviate", "the", "massive", "data", "needs", "of", "supervised", "machine", "learning", ":", "it", "has", "successfully", "improved", "sample", "efficiency", "by", "an", "order", "of", "magnitude", "on", "traditional", "tasks", "like", "topic", "classification", "and", "object", "recognition", ".", "however", ",", "we", "uncover", "a", "striking", "contrast", "to", "this", "promise", ":", "across", "5", "models", "and", "4", "datasets", "on", "the", "task", "of", "visual", "question", "answering", ",", "a", "wide", "variety", "of", "active", "learning", "approaches", "fail", "to", "outperform", "random", "selection", ".", "to", "understand", "this", "discrepancy", ",", "we", "profile", "8", "active", "learning", "methods", "on", "a", "per", "-", "example", "basis", ",", "and", "identify", "the", "problem", "as", "collective", "outliers", "\u2013", "groups", "of", "examples", "that", "active", "learning", "methods", "prefer", "to", "acquire", "but", "models", "fail", "to", "learn", "(", "e", ".", "g", ".", ",", "questions", "that", "ask", "about", "text", "in", "images", "or", "require", "external", "knowledge", ")", ".", "through", "systematic", "ablation", "experiments", "and", "qualitative", "visualizations", ",", "we", "verify", "that", "collective", "outliers", "are", "a", "general", "phenomenon", "responsible", "for", "degrading", "pool", "-", "based", "active", "learning", ".", "notably", ",", "we", "show", "that", "active", "learning", "sample", "efficiency", "increases", "significantly", "as", "the", "number", "of", "collective", "outliers", "in", "the", "active", "learning", "pool", "decreases", ".", "we", "conclude", "with", "a", "discussion", "and", "prescriptive", "recommendations", "for", "mitigating", "the", "effects", "of", "these", "outliers", "in", "future", "work", "."]}, {"venue": "ACL", "title": "Hierarchical Curriculum Learning for AMR Parsing", "abstract": "Abstract Meaning Representation (AMR) parsing aims to translate sentences to semantic representation with a hierarchical structure, and is recently empowered by pretrained sequence-to-sequence models. However, there exists a gap between their flat training objective (i.e., equally treats all output tokens) and the hierarchical AMR structure, which limits the model generalization. To bridge this gap, we propose a Hierarchical Curriculum Learning (HCL) framework with Structure-level (SC) and Instance-level Curricula (IC). SC switches progressively from core to detail AMR semantic elements while IC transits from structure-simple to -complex AMR instances during training. Through these two warming-up processes, HCL reduces the difficulty of learning complex structures, thus the flat model can better adapt to the AMR hierarchy. Extensive experiments on AMR2.0, AMR3.0, structure-complex and out-of-distribution situations verify the effectiveness of HCL.", "doc_id": "b7435098d58d86e0f8a339bc01fc0aea", "publication_year": 2022, "sentences": ["abstract meaning representation ( amr ) parsing aims to translate sentences to semantic representation with a hierarchical structure , and is recently empowered by pretrained sequence - to - sequence models .", "however , there exists a gap between their flat training objective ( i . e . , equally treats all output tokens ) and the hierarchical amr structure , which limits the model generalization .", "to bridge this gap , we propose a hierarchical curriculum learning ( hcl ) framework with structure - level ( sc ) and instance - level curricula ( ic ) .", "sc switches progressively from core to detail amr semantic elements while ic transits from structure - simple to - complex amr instances during training .", "through these two warming - up processes , hcl reduces the difficulty of learning complex structures , thus the flat model can better adapt to the amr hierarchy .", "extensive experiments on amr2 . 0 , amr3 . 0 , structure - complex and out - of - distribution situations verify the effectiveness of hcl ."], "events": [{"event_type": "ITT", "arguments": [{"text": "abstract meaning representation ( amr ) parsing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["abstract", "meaning", "representation", "parsing"], "offsets": [0, 1, 2, 6]}], "trigger": {"text": "translate", "tokens": ["translate"], "offsets": [9]}}, {"event_type": "RWF", "arguments": [{"text": "gap", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["gap"], "offsets": [37]}, {"text": "flat training objective", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["flat", "training", "objective"], "offsets": [40, 41, 42]}, {"text": "hierarchical amr structure", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["hierarchical", "amr", "structure"], "offsets": [57, 58, 59]}], "trigger": {"text": "exists", "tokens": ["exists"], "offsets": [35]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [72]}, {"text": "bridge", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["bridge"], "offsets": [68]}, {"text": "hierarchical curriculum learning ( hcl ) framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hierarchical", "curriculum", "learning", "framework"], "offsets": [75, 76, 77, 81]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [73]}}, {"event_type": "PUR", "arguments": [{"text": "gap", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["gap"], "offsets": [70]}], "trigger": {"text": "bridge", "tokens": ["bridge"], "offsets": [68]}}, {"event_type": "MDS", "arguments": [{"text": "during training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "training"], "offsets": [120, 121]}, {"text": "sc", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["sc"], "offsets": [98]}, {"text": "from core to detail amr semantic elements", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "core", "to", "detail", "abstract", "meaning", "representation", "semantic", "elements"], "offsets": [101, 102, 103, 104, 0, 1, 2, 106, 107]}], "trigger": {"text": "switches", "tokens": ["switches"], "offsets": [99]}}, {"event_type": "MDS", "arguments": [{"text": "during training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "training"], "offsets": [120, 121]}, {"text": "ic", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["ic"], "offsets": [109]}, {"text": "from structure - simple to - complex amr instances", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "structure", "-", "simple", "-", "complex", "abstract", "meaning", "representation", "instances"], "offsets": [111, 112, 113, 114, 116, 117, 0, 1, 2, 119]}], "trigger": {"text": "transits", "tokens": ["transits"], "offsets": [110]}}, {"event_type": "MDS", "arguments": [{"text": "structure - level", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["structure", "-", "level"], "offsets": [83, 84, 85]}, {"text": "instance - level", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["instance", "-", "level"], "offsets": [90, 91, 92]}, {"text": "reduces", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reduces"], "offsets": [132]}], "trigger": {"text": "through", "tokens": ["through"], "offsets": [123]}}, {"event_type": "PUR", "arguments": [{"text": "difficulty of learning complex structures", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["difficulty", "of", "learning", "complex", "structures"], "offsets": [134, 135, 136, 137, 138]}], "trigger": {"text": "reduces", "tokens": ["reduces"], "offsets": [132]}}, {"event_type": "FAC", "arguments": [{"text": "effectiveness", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["effectiveness"], "offsets": [175]}, {"text": "on amr2 . 0 , amr3 . 0 , structure - complex and out - of - distribution situations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "amr2", ".", "0", ",", "amr3", ".", "0", ",", "structure", "-", "complex", "and", "out", "-", "of", "-", "distribution", "situations"], "offsets": [154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172]}], "trigger": {"text": "verify", "tokens": ["verify"], "offsets": [173]}}], "document": ["abstract", "meaning", "representation", "(", "amr", ")", "parsing", "aims", "to", "translate", "sentences", "to", "semantic", "representation", "with", "a", "hierarchical", "structure", ",", "and", "is", "recently", "empowered", "by", "pretrained", "sequence", "-", "to", "-", "sequence", "models", ".", "however", ",", "there", "exists", "a", "gap", "between", "their", "flat", "training", "objective", "(", "i", ".", "e", ".", ",", "equally", "treats", "all", "output", "tokens", ")", "and", "the", "hierarchical", "amr", "structure", ",", "which", "limits", "the", "model", "generalization", ".", "to", "bridge", "this", "gap", ",", "we", "propose", "a", "hierarchical", "curriculum", "learning", "(", "hcl", ")", "framework", "with", "structure", "-", "level", "(", "sc", ")", "and", "instance", "-", "level", "curricula", "(", "ic", ")", ".", "sc", "switches", "progressively", "from", "core", "to", "detail", "amr", "semantic", "elements", "while", "ic", "transits", "from", "structure", "-", "simple", "to", "-", "complex", "amr", "instances", "during", "training", ".", "through", "these", "two", "warming", "-", "up", "processes", ",", "hcl", "reduces", "the", "difficulty", "of", "learning", "complex", "structures", ",", "thus", "the", "flat", "model", "can", "better", "adapt", "to", "the", "amr", "hierarchy", ".", "extensive", "experiments", "on", "amr2", ".", "0", ",", "amr3", ".", "0", ",", "structure", "-", "complex", "and", "out", "-", "of", "-", "distribution", "situations", "verify", "the", "effectiveness", "of", "hcl", "."]}, {"venue": "ACL", "title": "Focus Attention: Promoting Faithfulness and Diversity in Summarization", "abstract": "Professional summaries are written with document-level information, such as the theme of the document, in mind. This is in contrast with most seq2seq decoders which simultaneously learn to focus on salient content, while deciding what to generate, at each decoding step. With the motivation to narrow this gap, we introduce Focus Attention Mechanism, a simple yet effective method to encourage decoders to proactively generate tokens that are similar or topical to the input document. Further, we propose a Focus Sampling method to enable generation of diverse summaries, an area currently understudied in summarization. When evaluated on the BBC extreme summarization task, two state-of-the-art models augmented with Focus Attention generate summaries that are closer to the target and more faithful to their input documents, outperforming their vanilla counterparts on ROUGE and multiple faithfulness measures. We also empirically demonstrate that Focus Sampling is more effective in generating diverse and faithful summaries than top-k or nucleus sampling-based decoding methods.", "doc_id": "a694d58d57a794b6811dbdd80418bc99", "publication_year": 2021, "sentences": ["professional summaries are written with document - level information , such as the theme of the document , in mind .", "this is in contrast with most seq2seq decoders which simultaneously learn to focus on salient content , while deciding what to generate , at each decoding step .", "with the motivation to narrow this gap , we introduce focus attention mechanism , a simple yet effective method to encourage decoders to proactively generate tokens that are similar or topical to the input document .", "further , we propose a focus sampling method to enable generation of diverse summaries , an area currently understudied in summarization .", "when evaluated on the bbc extreme summarization task , two state - of - the - art models augmented with focus attention generate summaries that are closer to the target and more faithful to their input documents , outperforming their vanilla counterparts on rouge and multiple faithfulness measures .", "we also empirically demonstrate that focus sampling is more effective in generating diverse and faithful summaries than top - k or nucleus sampling - based decoding methods ."], "events": [{"event_type": "ITT", "arguments": [{"text": "professional summaries", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["professional", "summaries"], "offsets": [0, 1]}], "trigger": {"text": "written", "tokens": ["written"], "offsets": [3]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [87]}, {"text": "focus sampling method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["focus", "sampling", "method"], "offsets": [90, 91, 92]}, {"text": "enable", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enable"], "offsets": [94]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [88]}}, {"event_type": "PUR", "arguments": [{"text": "generation of diverse summaries", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["generation", "of", "diverse", "summaries"], "offsets": [95, 96, 97, 98]}], "trigger": {"text": "enable", "tokens": ["enable"], "offsets": [94]}}, {"event_type": "FAC", "arguments": [{"text": "two state - of - the - art models augmented with focus attention", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["two", "state", "-", "of", "-", "the", "-", "art", "models", "augmented", "with", "focus", "attention"], "offsets": [116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128]}, {"text": "summaries that are closer to the target", "nugget_type": "STR", "argument_type": "Object", "tokens": ["summaries", "that", "are", "closer", "to", "the", "target"], "offsets": [130, 131, 132, 133, 134, 135, 136]}, {"text": "summaries that are more faithful to their input documents", "nugget_type": "STR", "argument_type": "Object", "tokens": ["summaries", "that", "are", "more", "faithful", "to", "their", "input", "documents"], "offsets": [130, 131, 132, 138, 139, 140, 141, 142, 143]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [129]}}, {"event_type": "CMP", "arguments": [{"text": "two state - of - the - art models augmented with focus attention", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["two", "state", "-", "of", "-", "the", "-", "art", "models", "augmented", "with", "focus", "attention"], "offsets": [116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128]}, {"text": "outperforming", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforming"], "offsets": [145]}, {"text": "their vanilla counterparts", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["their", "vanilla", "counterparts"], "offsets": [146, 147, 148]}, {"text": "rouge measures", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["rouge", "measures"], "offsets": [150, 154]}], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [145]}}, {"event_type": "CMP", "arguments": [{"text": "focus sampling", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["focus", "sampling"], "offsets": [161, 162]}, {"text": "top - k or nucleus sampling - based decoding methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["top", "-", "k", "or", "nucleus", "sampling", "-", "based", "decoding", "methods"], "offsets": [173, 174, 175, 176, 177, 178, 179, 180, 181, 182]}, {"text": "more", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more"], "offsets": [164]}, {"text": "generating diverse", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["generating", "diverse"], "offsets": [167, 168]}, {"text": "generating faithful summaries", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["generating", "faithful", "summaries"], "offsets": [167, 170, 171]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [165]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [57]}, {"text": "focus attention mechanism", "nugget_type": "APP", "argument_type": "Content", "tokens": ["focus", "attention", "mechanism"], "offsets": [59, 60, 61]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [58]}}, {"event_type": "MDS", "arguments": [{"text": "tokens", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["tokens"], "offsets": [74]}, {"text": "that are similar or topical to the input document", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["that", "are", "similar", "or", "topical", "to", "the", "input", "document"], "offsets": [75, 76, 77, 78, 79, 80, 81, 82, 83]}], "trigger": {"text": "proactively generate", "tokens": ["proactively", "generate"], "offsets": [72, 73]}}, {"event_type": "FIN", "arguments": [{"text": "effective", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["effective"], "offsets": [165]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [159]}}, {"event_type": "WKS", "arguments": [{"text": "two state - of - the - art models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "state", "-", "of", "-", "the", "-", "art", "models"], "offsets": [116, 117, 118, 119, 120, 121, 122, 123, 124]}, {"text": "on the bbc extreme summarization task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "bbc", "extreme", "summarization", "task"], "offsets": [109, 110, 111, 112, 113, 114]}], "trigger": {"text": "evaluated", "tokens": ["evaluated"], "offsets": [108]}}], "document": ["professional", "summaries", "are", "written", "with", "document", "-", "level", "information", ",", "such", "as", "the", "theme", "of", "the", "document", ",", "in", "mind", ".", "this", "is", "in", "contrast", "with", "most", "seq2seq", "decoders", "which", "simultaneously", "learn", "to", "focus", "on", "salient", "content", ",", "while", "deciding", "what", "to", "generate", ",", "at", "each", "decoding", "step", ".", "with", "the", "motivation", "to", "narrow", "this", "gap", ",", "we", "introduce", "focus", "attention", "mechanism", ",", "a", "simple", "yet", "effective", "method", "to", "encourage", "decoders", "to", "proactively", "generate", "tokens", "that", "are", "similar", "or", "topical", "to", "the", "input", "document", ".", "further", ",", "we", "propose", "a", "focus", "sampling", "method", "to", "enable", "generation", "of", "diverse", "summaries", ",", "an", "area", "currently", "understudied", "in", "summarization", ".", "when", "evaluated", "on", "the", "bbc", "extreme", "summarization", "task", ",", "two", "state", "-", "of", "-", "the", "-", "art", "models", "augmented", "with", "focus", "attention", "generate", "summaries", "that", "are", "closer", "to", "the", "target", "and", "more", "faithful", "to", "their", "input", "documents", ",", "outperforming", "their", "vanilla", "counterparts", "on", "rouge", "and", "multiple", "faithfulness", "measures", ".", "we", "also", "empirically", "demonstrate", "that", "focus", "sampling", "is", "more", "effective", "in", "generating", "diverse", "and", "faithful", "summaries", "than", "top", "-", "k", "or", "nucleus", "sampling", "-", "based", "decoding", "methods", "."]}, {"venue": "ACL", "title": "Efficient Constituency Parsing by Pointing", "abstract": "We propose a novel constituency parsing model that casts the parsing problem into a series of pointing tasks. Specifically, our model estimates the likelihood of a span being a legitimate tree constituent via the pointing score corresponding to the boundary words of the span. Our parsing model supports efficient top-down decoding and our learning objective is able to enforce structural consistency without resorting to the expensive CKY inference. The experiments on the standard English Penn Treebank parsing task show that our method achieves 92.78 F1 without using pre-trained models, which is higher than all the existing methods with similar time complexity. Using pre-trained BERT, our model achieves 95.48 F1, which is competitive with the state-of-the-art while being faster. Our approach also establishes new state-of-the-art in Basque and Swedish in the SPMRL shared tasks on multilingual constituency parsing.", "doc_id": "dfad4808dc1ea82c71491179e4b9181a", "publication_year": 2020, "sentences": ["we propose a novel constituency parsing model that casts the parsing problem into a series of pointing tasks .", "specifically , our model estimates the likelihood of a span being a legitimate tree constituent via the pointing score corresponding to the boundary words of the span .", "our parsing model supports efficient top - down decoding and our learning objective is able to enforce structural consistency without resorting to the expensive cky inference .", "the experiments on the standard english penn treebank parsing task show that our method achieves 92 . 78 f1 without using pre - trained models , which is higher than all the existing methods with similar time complexity .", "using pre - trained bert , our model achieves 95 . 48 f1 , which is competitive with the state - of - the - art while being faster .", "our approach also establishes new state - of - the - art in basque and swedish in the spmrl shared tasks on multilingual constituency parsing ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "constituency parsing model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["constituency", "parsing", "model"], "offsets": [4, 5, 6]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [1]}}, {"event_type": "MDS", "arguments": [{"text": "series of pointing tasks", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["series", "of", "pointing", "tasks"], "offsets": [14, 15, 16, 17]}, {"text": "parsing problem", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["parsing", "problem"], "offsets": [10, 11]}], "trigger": {"text": "casts", "tokens": ["casts"], "offsets": [8]}}, {"event_type": "MDS", "arguments": [{"text": "via the pointing score corresponding to the boundary words of the span", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "the", "pointing", "score", "corresponding", "to", "the", "boundary", "words", "of", "the", "span"], "offsets": [34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45]}, {"text": "span", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["span"], "offsets": [28]}], "trigger": {"text": "estimates", "tokens": ["estimates"], "offsets": [23]}}, {"event_type": "FAC", "arguments": [{"text": "parsing model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["parsing", "model"], "offsets": [48, 49]}, {"text": "top - down decoding", "nugget_type": "APP", "argument_type": "Object", "tokens": ["top", "-", "down", "decoding"], "offsets": [52, 53, 54, 55]}], "trigger": {"text": "supports", "tokens": ["supports"], "offsets": [50]}}, {"event_type": "FAC", "arguments": [{"text": "learning objective", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["learning", "objective"], "offsets": [58, 59]}, {"text": "structural consistency", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["structural", "consistency"], "offsets": [64, 65]}, {"text": "without resorting to the expensive cky inference", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "resorting", "to", "the", "expensive", "cky", "inference"], "offsets": [66, 67, 68, 69, 70, 71, 72]}], "trigger": {"text": "enforce", "tokens": ["enforce"], "offsets": [63]}}, {"event_type": "CMP", "arguments": [{"text": "on the standard english penn treebank", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "standard", "english", "penn", "treebank"], "offsets": [76, 77, 78, 79, 80, 81]}, {"text": "92 . 78", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["92", ".", "78"], "offsets": [89, 90, 91]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1"], "offsets": [92]}, {"text": "without using pre - trained models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "using", "pre", "-", "trained", "models"], "offsets": [93, 94, 95, 96, 97, 98]}, {"text": "higher", "nugget_type": "STR", "argument_type": "Result", "tokens": ["higher"], "offsets": [102]}, {"text": "all the existing methods with similar time complexity", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["all", "the", "existing", "methods", "with", "similar", "time", "complexity"], "offsets": [104, 105, 106, 107, 108, 109, 110, 111]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [88]}}, {"event_type": "CMP", "arguments": [{"text": "using pre - trained bert", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "pre", "-", "trained", "bert"], "offsets": [113, 114, 115, 116, 117]}, {"text": "95 . 48", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["95", ".", "48"], "offsets": [122, 123, 124]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1"], "offsets": [125]}, {"text": "competitive", "nugget_type": "STR", "argument_type": "Result", "tokens": ["competitive"], "offsets": [129]}, {"text": "state - of - the - art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [132, 133, 134, 135, 136, 137, 138]}, {"text": "while being faster", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "being", "faster"], "offsets": [139, 140, 141]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [121]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art", "nugget_type": "APP", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [148, 149, 150, 151, 152, 153, 154]}, {"text": "multilingual constituency parsing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multilingual", "constituency", "parsing"], "offsets": [165, 166, 167]}, {"text": "in the spmrl shared tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "spmrl", "shared", "tasks"], "offsets": [159, 160, 161, 162, 163]}], "trigger": {"text": "establishes", "tokens": ["establishes"], "offsets": [146]}}], "document": ["we", "propose", "a", "novel", "constituency", "parsing", "model", "that", "casts", "the", "parsing", "problem", "into", "a", "series", "of", "pointing", "tasks", ".", "specifically", ",", "our", "model", "estimates", "the", "likelihood", "of", "a", "span", "being", "a", "legitimate", "tree", "constituent", "via", "the", "pointing", "score", "corresponding", "to", "the", "boundary", "words", "of", "the", "span", ".", "our", "parsing", "model", "supports", "efficient", "top", "-", "down", "decoding", "and", "our", "learning", "objective", "is", "able", "to", "enforce", "structural", "consistency", "without", "resorting", "to", "the", "expensive", "cky", "inference", ".", "the", "experiments", "on", "the", "standard", "english", "penn", "treebank", "parsing", "task", "show", "that", "our", "method", "achieves", "92", ".", "78", "f1", "without", "using", "pre", "-", "trained", "models", ",", "which", "is", "higher", "than", "all", "the", "existing", "methods", "with", "similar", "time", "complexity", ".", "using", "pre", "-", "trained", "bert", ",", "our", "model", "achieves", "95", ".", "48", "f1", ",", "which", "is", "competitive", "with", "the", "state", "-", "of", "-", "the", "-", "art", "while", "being", "faster", ".", "our", "approach", "also", "establishes", "new", "state", "-", "of", "-", "the", "-", "art", "in", "basque", "and", "swedish", "in", "the", "spmrl", "shared", "tasks", "on", "multilingual", "constituency", "parsing", "."]}, {"venue": "ACL", "title": "Implicit Discourse Relation Classification: We Need to Talk about Evaluation", "abstract": "Implicit relation classification on Penn Discourse TreeBank (PDTB) 2.0 is a common benchmark task for evaluating the understanding of discourse relations. However, the lack of consistency in preprocessing and evaluation poses challenges to fair comparison of results in the literature. In this work, we highlight these inconsistencies and propose an improved evaluation protocol. Paired with this protocol, we report strong baseline results from pretrained sentence encoders, which set the new state-of-the-art for PDTB 2.0. Furthermore, this work is the first to explore fine-grained relation classification on PDTB 3.0. We expect our work to serve as a point of comparison for future work, and also as an initiative to discuss models of larger context and possible data augmentations for downstream transferability.", "doc_id": "dc222f7a8347fb5208d8166366ab7bdd", "publication_year": 2020, "sentences": ["implicit relation classification on penn discourse treebank ( pdtb ) 2 . 0 is a common benchmark task for evaluating the understanding of discourse relations .", "however , the lack of consistency in preprocessing and evaluation poses challenges to fair comparison of results in the literature .", "in this work , we highlight these inconsistencies and propose an improved evaluation protocol .", "paired with this protocol , we report strong baseline results from pretrained sentence encoders , which set the new state - of - the - art for pdtb 2 . 0 .", "furthermore , this work is the first to explore fine - grained relation classification on pdtb 3 . 0 .", "we expect our work to serve as a point of comparison for future work , and also as an initiative to discuss models of larger context and possible data augmentations for downstream transferability ."], "events": [{"event_type": "ITT", "arguments": [{"text": "understanding of discourse relations", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["understanding", "of", "discourse", "relations"], "offsets": [21, 22, 23, 24]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [17]}}, {"event_type": "RWF", "arguments": [{"text": "preprocessing and evaluation", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["preprocessing", "and", "evaluation"], "offsets": [33, 34, 35]}], "trigger": {"text": "lack of consistency", "tokens": ["lack", "of", "consistency"], "offsets": [29, 30, 31]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [51]}, {"text": "inconsistencies", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["inconsistencies"], "offsets": [54]}], "trigger": {"text": "highlight", "tokens": ["highlight"], "offsets": [52]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [51]}, {"text": "improved evaluation protocol", "nugget_type": "APP", "argument_type": "Content", "tokens": ["improved", "evaluation", "protocol"], "offsets": [58, 59, 60]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [56]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [67]}, {"text": "strong baseline results", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["strong", "baseline", "results"], "offsets": [69, 70, 71]}, {"text": "from pretrained sentence encoders", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "pretrained", "sentence", "encoders"], "offsets": [72, 73, 74, 75]}, {"text": "set", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["set"], "offsets": [78]}], "trigger": {"text": "report", "tokens": ["report"], "offsets": [68]}}, {"event_type": "PUR", "arguments": [{"text": "new state - of - the - art for pdtb 2 . 0", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["new", "state", "-", "of", "-", "the", "-", "art", "for", "pdtb", "2", ".", "0"], "offsets": [80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92]}], "trigger": {"text": "set", "tokens": ["set"], "offsets": [78]}}, {"event_type": "WKS", "arguments": [{"text": "fine - grained relation classification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["fine", "-", "grained", "relation", "classification"], "offsets": [103, 104, 105, 106, 107]}, {"text": "on pdtb 3 . 0", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "pdtb", "3", ".", "0"], "offsets": [108, 109, 110, 111, 112]}], "trigger": {"text": "first to explore", "tokens": ["first", "to", "explore"], "offsets": [100, 101, 102]}}], "document": ["implicit", "relation", "classification", "on", "penn", "discourse", "treebank", "(", "pdtb", ")", "2", ".", "0", "is", "a", "common", "benchmark", "task", "for", "evaluating", "the", "understanding", "of", "discourse", "relations", ".", "however", ",", "the", "lack", "of", "consistency", "in", "preprocessing", "and", "evaluation", "poses", "challenges", "to", "fair", "comparison", "of", "results", "in", "the", "literature", ".", "in", "this", "work", ",", "we", "highlight", "these", "inconsistencies", "and", "propose", "an", "improved", "evaluation", "protocol", ".", "paired", "with", "this", "protocol", ",", "we", "report", "strong", "baseline", "results", "from", "pretrained", "sentence", "encoders", ",", "which", "set", "the", "new", "state", "-", "of", "-", "the", "-", "art", "for", "pdtb", "2", ".", "0", ".", "furthermore", ",", "this", "work", "is", "the", "first", "to", "explore", "fine", "-", "grained", "relation", "classification", "on", "pdtb", "3", ".", "0", ".", "we", "expect", "our", "work", "to", "serve", "as", "a", "point", "of", "comparison", "for", "future", "work", ",", "and", "also", "as", "an", "initiative", "to", "discuss", "models", "of", "larger", "context", "and", "possible", "data", "augmentations", "for", "downstream", "transferability", "."]}, {"venue": "ACL", "title": "TIMEDIAL: Temporal Commonsense Reasoning in Dialog", "abstract": "Everyday conversations require understanding everyday events, which in turn, requires understanding temporal commonsense concepts interwoven with those events. Despite recent progress with massive pre-trained language models (LMs) such as T5 and GPT-3, their capability of temporal reasoning in dialogs remains largely under-explored. In this paper, we present the first study to investigate pre-trained LMs for their temporal reasoning capabilities in dialogs by introducing a new task and a crowd-sourced English challenge set, TimeDial. We formulate TimeDial as a multiple choice cloze task with over 1.1K carefully curated dialogs. Empirical results demonstrate that even the best performing models struggle on this task compared to humans, with 23 absolute points of gap in accuracy. Furthermore, our analysis reveals that the models fail to reason about dialog context correctly; instead, they rely on shallow cues based on existing temporal patterns in context, motivating future research for modeling temporal concepts in text and robust contextual reasoning about them. The dataset is publicly available at https://github.com/google-research-datasets/timedial.", "doc_id": "751275eaf861b43c93f9a46e22cae935", "publication_year": 2021, "sentences": ["everyday conversations require understanding everyday events , which in turn , requires understanding temporal commonsense concepts interwoven with those events .", "despite recent progress with massive pre - trained language models ( lms ) such as t5 and gpt - 3 , their capability of temporal reasoning in dialogs remains largely under - explored .", "in this paper , we present the first study to investigate pre - trained lms for their temporal reasoning capabilities in dialogs by introducing a new task and a crowd - sourced english challenge set , timedial .", "we formulate timedial as a multiple choice cloze task with over 1 . 1k carefully curated dialogs .", "empirical results demonstrate that even the best performing models struggle on this task compared to humans , with 23 absolute points of gap in accuracy .", "furthermore , our analysis reveals that the models fail to reason about dialog context correctly ; instead , they rely on shallow cues based on existing temporal patterns in context , motivating future research for modeling temporal concepts in text and robust contextual reasoning about them .", "the dataset is publicly available at https : / / github . com / google - research - datasets / timedial ."], "events": [{"event_type": "ITT", "arguments": [{"text": "massive pre - trained language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["massive", "pre", "-", "trained", "language", "models"], "offsets": [25, 26, 27, 28, 29, 30]}], "trigger": {"text": "progress", "tokens": ["progress"], "offsets": [23]}}, {"event_type": "RWF", "arguments": [{"text": "capability of temporal reasoning", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["capability", "of", "temporal", "reasoning"], "offsets": [43, 44, 45, 46]}, {"text": "under - explored", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["under", "-", "explored"], "offsets": [51, 52, 53]}, {"text": "largely", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["largely"], "offsets": [50]}], "trigger": {"text": "remains", "tokens": ["remains"], "offsets": [49]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [59]}, {"text": "new task", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["new", "task"], "offsets": [80, 81]}, {"text": "crowd - sourced english challenge set", "nugget_type": "APP", "argument_type": "Content", "tokens": ["crowd", "-", "sourced", "english", "challenge", "set"], "offsets": [84, 85, 86, 87, 88, 89]}], "trigger": {"text": "introducing", "tokens": ["introducing"], "offsets": [78]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [59]}, {"text": "temporal reasoning capabilities", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["temporal", "reasoning", "capabilities"], "offsets": [72, 73, 74]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [65]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [93]}, {"text": "timedial", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["timedial"], "offsets": [95]}, {"text": "multiple choice cloze task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multiple", "choice", "cloze", "task"], "offsets": [98, 99, 100, 101]}, {"text": "1 . 1k carefully curated dialogs", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["1", ".", "1k", "carefully", "curated", "dialogs"], "offsets": [104, 105, 106, 107, 108, 109]}], "trigger": {"text": "formulate", "tokens": ["formulate"], "offsets": [94]}}, {"event_type": "CMP", "arguments": [{"text": "best performing models", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["best", "performing", "models"], "offsets": [117, 118, 119]}, {"text": "humans", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["humans"], "offsets": [126]}, {"text": "23 absolute points", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["23", "absolute", "points"], "offsets": [129, 130, 131]}, {"text": "gap in accuracy", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["gap", "in", "accuracy"], "offsets": [133, 134, 135]}], "trigger": {"text": "struggle on", "tokens": ["struggle", "on"], "offsets": [120, 121]}}, {"event_type": "FAC", "arguments": [{"text": "dialog context", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["dialog", "context"], "offsets": [149, 150]}], "trigger": {"text": "fail to reason", "tokens": ["fail", "to", "reason"], "offsets": [145, 146, 147]}}, {"event_type": "FAC", "arguments": [{"text": "future research", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["future", "research"], "offsets": [169, 170]}, {"text": "modeling", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["modeling"], "offsets": [172]}], "trigger": {"text": "motivating", "tokens": ["motivating"], "offsets": [168]}}, {"event_type": "PUR", "arguments": [{"text": "temporal concepts", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["temporal", "concepts"], "offsets": [173, 174]}], "trigger": {"text": "modeling", "tokens": ["modeling"], "offsets": [172]}}], "document": ["everyday", "conversations", "require", "understanding", "everyday", "events", ",", "which", "in", "turn", ",", "requires", "understanding", "temporal", "commonsense", "concepts", "interwoven", "with", "those", "events", ".", "despite", "recent", "progress", "with", "massive", "pre", "-", "trained", "language", "models", "(", "lms", ")", "such", "as", "t5", "and", "gpt", "-", "3", ",", "their", "capability", "of", "temporal", "reasoning", "in", "dialogs", "remains", "largely", "under", "-", "explored", ".", "in", "this", "paper", ",", "we", "present", "the", "first", "study", "to", "investigate", "pre", "-", "trained", "lms", "for", "their", "temporal", "reasoning", "capabilities", "in", "dialogs", "by", "introducing", "a", "new", "task", "and", "a", "crowd", "-", "sourced", "english", "challenge", "set", ",", "timedial", ".", "we", "formulate", "timedial", "as", "a", "multiple", "choice", "cloze", "task", "with", "over", "1", ".", "1k", "carefully", "curated", "dialogs", ".", "empirical", "results", "demonstrate", "that", "even", "the", "best", "performing", "models", "struggle", "on", "this", "task", "compared", "to", "humans", ",", "with", "23", "absolute", "points", "of", "gap", "in", "accuracy", ".", "furthermore", ",", "our", "analysis", "reveals", "that", "the", "models", "fail", "to", "reason", "about", "dialog", "context", "correctly", ";", "instead", ",", "they", "rely", "on", "shallow", "cues", "based", "on", "existing", "temporal", "patterns", "in", "context", ",", "motivating", "future", "research", "for", "modeling", "temporal", "concepts", "in", "text", "and", "robust", "contextual", "reasoning", "about", "them", ".", "the", "dataset", "is", "publicly", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "google", "-", "research", "-", "datasets", "/", "timedial", "."]}, {"venue": "ACL", "title": "On the Intrinsic and Extrinsic Fairness Evaluation Metrics for Contextualized Language Representations", "abstract": "Multiple metrics have been introduced to measure fairness in various natural language processing tasks. These metrics can be roughly categorized into two categories: 1) extrinsic metrics for evaluating fairness in downstream applications and 2) intrinsic metrics for estimating fairness in upstream contextualized language representation models. In this paper, we conduct an extensive correlation study between intrinsic and extrinsic metrics across bias notions using 19 contextualized language models. We find that intrinsic and extrinsic metrics do not necessarily correlate in their original setting, even when correcting for metric misalignments, noise in evaluation datasets, and confounding factors such as experiment configuration for extrinsic metrics.", "doc_id": "a705e0c134eadea7475803299211da7c", "publication_year": 2022, "sentences": ["multiple metrics have been introduced to measure fairness in various natural language processing tasks .", "these metrics can be roughly categorized into two categories : 1 ) extrinsic metrics for evaluating fairness in downstream applications and 2 ) intrinsic metrics for estimating fairness in upstream contextualized language representation models .", "in this paper , we conduct an extensive correlation study between intrinsic and extrinsic metrics across bias notions using 19 contextualized language models .", "we find that intrinsic and extrinsic metrics do not necessarily correlate in their original setting , even when correcting for metric misalignments , noise in evaluation datasets , and confounding factors such as experiment configuration for extrinsic metrics ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language processing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "processing"], "offsets": [10, 11, 12]}], "trigger": {"text": "measure", "tokens": ["measure"], "offsets": [6]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [54]}, {"text": "19 contextualized language models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["19", "contextualized", "language", "models"], "offsets": [69, 70, 71, 72]}, {"text": "conduct", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["conduct"], "offsets": [55]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [68]}}, {"event_type": "PUR", "arguments": [{"text": "extensive correlation study", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["extensive", "correlation", "study"], "offsets": [57, 58, 59]}, {"text": "between intrinsic and extrinsic metrics across bias notions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "intrinsic", "and", "extrinsic", "metrics", "across", "bias", "notions"], "offsets": [60, 61, 62, 63, 64, 65, 66, 67]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [55]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [74]}, {"text": "not necessarily correlate", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["not", "necessarily", "correlate"], "offsets": [82, 83, 84]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [75]}}, {"event_type": "FAC", "arguments": [{"text": "original setting", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["original", "setting"], "offsets": [87, 88]}, {"text": "when correcting for metric misalignments , noise in evaluation datasets , and confounding factors such as experiment configuration for extrinsic metrics", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "correcting", "for", "metric", "misalignments", ",", "noise", "in", "evaluation", "datasets", ",", "and", "confounding", "factors", "such", "as", "experiment", "configuration", "for", "extrinsic", "metrics"], "offsets": [91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111]}, {"text": "intrinsic metrics", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["intrinsic", "metrics"], "offsets": [77, 80]}, {"text": "extrinsic metrics", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["extrinsic", "metrics"], "offsets": [79, 80]}], "trigger": {"text": "not necessarily correlate", "tokens": ["not", "necessarily", "correlate"], "offsets": [82, 83, 84]}}], "document": ["multiple", "metrics", "have", "been", "introduced", "to", "measure", "fairness", "in", "various", "natural", "language", "processing", "tasks", ".", "these", "metrics", "can", "be", "roughly", "categorized", "into", "two", "categories", ":", "1", ")", "extrinsic", "metrics", "for", "evaluating", "fairness", "in", "downstream", "applications", "and", "2", ")", "intrinsic", "metrics", "for", "estimating", "fairness", "in", "upstream", "contextualized", "language", "representation", "models", ".", "in", "this", "paper", ",", "we", "conduct", "an", "extensive", "correlation", "study", "between", "intrinsic", "and", "extrinsic", "metrics", "across", "bias", "notions", "using", "19", "contextualized", "language", "models", ".", "we", "find", "that", "intrinsic", "and", "extrinsic", "metrics", "do", "not", "necessarily", "correlate", "in", "their", "original", "setting", ",", "even", "when", "correcting", "for", "metric", "misalignments", ",", "noise", "in", "evaluation", "datasets", ",", "and", "confounding", "factors", "such", "as", "experiment", "configuration", "for", "extrinsic", "metrics", "."]}, {"venue": "ACL", "title": "Adversarial Multitask Learning for Joint Multi-Feature and Multi-Dialect Morphological Modeling", "abstract": "Morphological tagging is challenging for morphologically rich languages due to the large target space and the need for more training data to minimize model sparsity. Dialectal variants of morphologically rich languages suffer more as they tend to be more noisy and have less resources. In this paper we explore the use of multitask learning and adversarial training to address morphological richness and dialectal variations in the context of full morphological tagging. We use multitask learning for joint morphological modeling for the features within two dialects, and as a knowledge-transfer scheme for cross-dialectal modeling. We use adversarial training to learn dialect invariant features that can help the knowledge-transfer scheme from the high to low-resource variants. We work with two dialectal variants: Modern Standard Arabic (high-resource \u201cdialect\u2019\u201d) and Egyptian Arabic (low-resource dialect) as a case study. Our models achieve state-of-the-art results for both. Furthermore, adversarial training provides more significant improvement when using smaller training datasets in particular.", "doc_id": "e7815e553e49295a238856f2f27022ec", "publication_year": 2019, "sentences": ["morphological tagging is challenging for morphologically rich languages due to the large target space and the need for more training data to minimize model sparsity .", "dialectal variants of morphologically rich languages suffer more as they tend to be more noisy and have less resources .", "in this paper we explore the use of multitask learning and adversarial training to address morphological richness and dialectal variations in the context of full morphological tagging .", "we use multitask learning for joint morphological modeling for the features within two dialects , and as a knowledge - transfer scheme for cross - dialectal modeling .", "we use adversarial training to learn dialect invariant features that can help the knowledge - transfer scheme from the high to low - resource variants .", "we work with two dialectal variants : modern standard arabic ( high - resource \u201c dialect \u2019 \u201d ) and egyptian arabic ( low - resource dialect ) as a case study .", "our models achieve state - of - the - art results for both .", "furthermore , adversarial training provides more significant improvement when using smaller training datasets in particular ."], "events": [{"event_type": "RWF", "arguments": [{"text": "challenging", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["challenging"], "offsets": [3]}, {"text": "morphologically rich languages", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["morphologically", "rich", "languages"], "offsets": [5, 6, 7]}, {"text": "morphological tagging", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["morphological", "tagging"], "offsets": [0, 1]}, {"text": "large target space", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["large", "target", "space"], "offsets": [11, 12, 13]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [3]}}, {"event_type": "RWF", "arguments": [{"text": "suffer more", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["suffer", "more"], "offsets": [32, 33]}], "trigger": {"text": "suffer more", "tokens": ["suffer", "more"], "offsets": [32, 33]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [49]}, {"text": "multitask learning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multitask", "learning"], "offsets": [54, 55]}, {"text": "adversarial training", "nugget_type": "APP", "argument_type": "Content", "tokens": ["adversarial", "training"], "offsets": [57, 58]}, {"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [60]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [52]}}, {"event_type": "PUR", "arguments": [{"text": "in the context of full morphological tagging", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "context", "of", "full", "morphological", "tagging"], "offsets": [66, 67, 68, 69, 70, 71, 72]}, {"text": "morphological richness", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["morphological", "richness"], "offsets": [61, 62]}, {"text": "dialectal variations", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["dialectal", "variations"], "offsets": [64, 65]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [60]}}, {"event_type": "MDS", "arguments": [{"text": "multitask learning", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["multitask", "learning"], "offsets": [76, 77]}, {"text": "features within two dialects", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["features", "within", "two", "dialects"], "offsets": [84, 85, 86, 87]}, {"text": "joint morphological modeling", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["joint", "morphological", "modeling"], "offsets": [79, 80, 81]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [75]}}, {"event_type": "MDS", "arguments": [{"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [107]}, {"text": "adversarial training", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["adversarial", "training"], "offsets": [104, 105]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [103]}}, {"event_type": "PUR", "arguments": [{"text": "dialect invariant features", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["dialect", "invariant", "features"], "offsets": [108, 109, 110]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [107]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [128]}, {"text": "modern standard arabic", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["modern", "standard", "arabic"], "offsets": [135, 136, 137]}, {"text": "egyptian arabic", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["egyptian", "arabic"], "offsets": [148, 149]}], "trigger": {"text": "work with", "tokens": ["work", "with"], "offsets": [129, 130]}}, {"event_type": "FAC", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["models"], "offsets": [162]}, {"text": "state - of - the - art results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [164, 165, 166, 167, 168, 169, 170, 171]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [163]}}, {"event_type": "FAC", "arguments": [{"text": "adversarial training", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["adversarial", "training"], "offsets": [177, 178]}, {"text": "more significant improvement", "nugget_type": "STR", "argument_type": "Object", "tokens": ["more", "significant", "improvement"], "offsets": [180, 181, 182]}, {"text": "when using smaller training datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "using", "smaller", "training", "datasets"], "offsets": [183, 184, 185, 186, 187]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [179]}}, {"event_type": "RWF", "arguments": [{"text": "dialectal variants of morphologically rich languages", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["dialectal", "variants", "of", "morphologically", "rich", "languages"], "offsets": [26, 27, 28, 29, 30, 31]}], "trigger": {"text": "more noisy", "tokens": ["more", "noisy"], "offsets": [39, 40]}}, {"event_type": "RWF", "arguments": [{"text": "dialectal variants of morphologically rich languages", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["dialectal", "variants", "of", "morphologically", "rich", "languages"], "offsets": [26, 27, 28, 29, 30, 31]}, {"text": "less resources", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["less", "resources"], "offsets": [43, 44]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [42]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [74]}, {"text": "multitask learning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multitask", "learning"], "offsets": [76, 77]}, {"text": "as a knowledge - transfer scheme", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "a", "knowledge", "-", "transfer", "scheme"], "offsets": [90, 91, 92, 93, 94, 95]}, {"text": "cross - dialectal modeling", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["cross", "-", "dialectal", "modeling"], "offsets": [97, 98, 99, 100]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [75]}}], "document": ["morphological", "tagging", "is", "challenging", "for", "morphologically", "rich", "languages", "due", "to", "the", "large", "target", "space", "and", "the", "need", "for", "more", "training", "data", "to", "minimize", "model", "sparsity", ".", "dialectal", "variants", "of", "morphologically", "rich", "languages", "suffer", "more", "as", "they", "tend", "to", "be", "more", "noisy", "and", "have", "less", "resources", ".", "in", "this", "paper", "we", "explore", "the", "use", "of", "multitask", "learning", "and", "adversarial", "training", "to", "address", "morphological", "richness", "and", "dialectal", "variations", "in", "the", "context", "of", "full", "morphological", "tagging", ".", "we", "use", "multitask", "learning", "for", "joint", "morphological", "modeling", "for", "the", "features", "within", "two", "dialects", ",", "and", "as", "a", "knowledge", "-", "transfer", "scheme", "for", "cross", "-", "dialectal", "modeling", ".", "we", "use", "adversarial", "training", "to", "learn", "dialect", "invariant", "features", "that", "can", "help", "the", "knowledge", "-", "transfer", "scheme", "from", "the", "high", "to", "low", "-", "resource", "variants", ".", "we", "work", "with", "two", "dialectal", "variants", ":", "modern", "standard", "arabic", "(", "high", "-", "resource", "\u201c", "dialect", "\u2019", "\u201d", ")", "and", "egyptian", "arabic", "(", "low", "-", "resource", "dialect", ")", "as", "a", "case", "study", ".", "our", "models", "achieve", "state", "-", "of", "-", "the", "-", "art", "results", "for", "both", ".", "furthermore", ",", "adversarial", "training", "provides", "more", "significant", "improvement", "when", "using", "smaller", "training", "datasets", "in", "particular", "."]}, {"venue": "ACL", "title": "Attention Calibration for Transformer in Neural Machine Translation", "abstract": "Attention mechanisms have achieved substantial improvements in neural machine translation by dynamically selecting relevant inputs for different predictions. However, recent studies have questioned the attention mechanisms\u2019 capability for discovering decisive inputs. In this paper, we propose to calibrate the attention weights by introducing a mask perturbation model that automatically evaluates each input\u2019s contribution to the model outputs. We increase the attention weights assigned to the indispensable tokens, whose removal leads to a dramatic performance decrease. The extensive experiments on the Transformer-based translation have demonstrated the effectiveness of our model. We further find that the calibrated attention weights are more uniform at lower layers to collect multiple information while more concentrated on the specific inputs at higher layers. Detailed analyses also show a great need for calibration in the attention weights with high entropy where the model is unconfident about its decision.", "doc_id": "2bf14ac17fe5864c9d48551db9f081ee", "publication_year": 2021, "sentences": ["attention mechanisms have achieved substantial improvements in neural machine translation by dynamically selecting relevant inputs for different predictions .", "however , recent studies have questioned the attention mechanisms \u2019 capability for discovering decisive inputs .", "in this paper , we propose to calibrate the attention weights by introducing a mask perturbation model that automatically evaluates each input \u2019 s contribution to the model outputs .", "we increase the attention weights assigned to the indispensable tokens , whose removal leads to a dramatic performance decrease .", "the extensive experiments on the transformer - based translation have demonstrated the effectiveness of our model .", "we further find that the calibrated attention weights are more uniform at lower layers to collect multiple information while more concentrated on the specific inputs at higher layers .", "detailed analyses also show a great need for calibration in the attention weights with high entropy where the model is unconfident about its decision ."], "events": [{"event_type": "RWS", "arguments": [{"text": "attention mechanisms", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["attention", "mechanisms"], "offsets": [0, 1]}, {"text": "relevant inputs", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["relevant", "inputs"], "offsets": [13, 14]}, {"text": "different predictions", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["different", "predictions"], "offsets": [16, 17]}], "trigger": {"text": "selecting", "tokens": ["selecting"], "offsets": [12]}}, {"event_type": "ITT", "arguments": [{"text": "attention mechanisms", "nugget_type": "APP", "argument_type": "Target", "tokens": ["attention", "mechanisms"], "offsets": [0, 1]}, {"text": "in neural machine translation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "neural", "machine", "translation"], "offsets": [6, 7, 8, 9]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [3]}}, {"event_type": "RWF", "arguments": [{"text": "questioned", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["questioned"], "offsets": [24]}, {"text": "attention mechanisms", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["attention", "mechanisms"], "offsets": [26, 27]}], "trigger": {"text": "questioned", "tokens": ["questioned"], "offsets": [24]}}, {"event_type": "PUR", "arguments": [{"text": "decisive inputs", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["decisive", "inputs"], "offsets": [32, 33]}], "trigger": {"text": "discovering", "tokens": ["discovering"], "offsets": [31]}}, {"event_type": "PRP", "arguments": [{"text": "mask perturbation model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["mask", "perturbation", "model"], "offsets": [49, 50, 51]}, {"text": "calibrate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["calibrate"], "offsets": [42]}], "trigger": {"text": "introducing", "tokens": ["introducing"], "offsets": [47]}}, {"event_type": "PUR", "arguments": [{"text": "attention weights", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["attention", "weights"], "offsets": [44, 45]}], "trigger": {"text": "calibrate", "tokens": ["calibrate"], "offsets": [42]}}, {"event_type": "MDS", "arguments": [{"text": "each input \u2019 s contribution", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["each", "input", "\u2019", "s", "contribution"], "offsets": [55, 56, 57, 58, 59]}, {"text": "model outputs", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["model", "outputs"], "offsets": [62, 63]}], "trigger": {"text": "evaluates", "tokens": ["evaluates"], "offsets": [54]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [65]}, {"text": "indispensable tokens", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["indispensable", "tokens"], "offsets": [73, 74]}, {"text": "attention weights", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["attention", "weights"], "offsets": [68, 69]}], "trigger": {"text": "increase", "tokens": ["increase"], "offsets": [66]}}, {"event_type": "FAC", "arguments": [{"text": "dramatic performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["dramatic", "performance"], "offsets": [81, 82]}, {"text": "decrease", "nugget_type": "WEA", "argument_type": "Target", "tokens": ["decrease"], "offsets": [83]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [78]}}, {"event_type": "FAC", "arguments": [{"text": "on the transformer - based translation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "transformer", "-", "based", "translation"], "offsets": [88, 89, 90, 91, 92, 93]}, {"text": "effectiveness of our model", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["effectiveness", "of", "mask", "perturbation", "model"], "offsets": [97, 98, 49, 50, 51]}], "trigger": {"text": "demonstrated", "tokens": ["demonstrated"], "offsets": [95]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [102]}, {"text": "more uniform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["more", "uniform"], "offsets": [111, 112]}, {"text": "more concentrated", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["more", "concentrated"], "offsets": [121, 122]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [104]}}, {"event_type": "CMP", "arguments": [{"text": "lower layers", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["lower", "layers"], "offsets": [114, 115]}], "trigger": {"text": "more uniform", "tokens": ["more", "uniform"], "offsets": [111, 112]}}, {"event_type": "CMP", "arguments": [{"text": "more concentrated", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "concentrated"], "offsets": [121, 122]}], "trigger": {"text": "more concentrated", "tokens": ["more", "concentrated"], "offsets": [121, 122]}}, {"event_type": "FIN", "arguments": [{"text": "calibration", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["calibration"], "offsets": [139]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [134]}}, {"event_type": "FAC", "arguments": [{"text": "attention weights with high entropy", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["attention", "weights", "with", "high", "entropy"], "offsets": [142, 143, 144, 145, 146]}, {"text": "where the model is unconfident about its decision", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["where", "the", "model", "is", "unconfident", "about", "its", "decision"], "offsets": [147, 148, 149, 150, 151, 152, 153, 154]}], "trigger": {"text": "calibration", "tokens": ["calibration"], "offsets": [139]}}], "document": ["attention", "mechanisms", "have", "achieved", "substantial", "improvements", "in", "neural", "machine", "translation", "by", "dynamically", "selecting", "relevant", "inputs", "for", "different", "predictions", ".", "however", ",", "recent", "studies", "have", "questioned", "the", "attention", "mechanisms", "\u2019", "capability", "for", "discovering", "decisive", "inputs", ".", "in", "this", "paper", ",", "we", "propose", "to", "calibrate", "the", "attention", "weights", "by", "introducing", "a", "mask", "perturbation", "model", "that", "automatically", "evaluates", "each", "input", "\u2019", "s", "contribution", "to", "the", "model", "outputs", ".", "we", "increase", "the", "attention", "weights", "assigned", "to", "the", "indispensable", "tokens", ",", "whose", "removal", "leads", "to", "a", "dramatic", "performance", "decrease", ".", "the", "extensive", "experiments", "on", "the", "transformer", "-", "based", "translation", "have", "demonstrated", "the", "effectiveness", "of", "our", "model", ".", "we", "further", "find", "that", "the", "calibrated", "attention", "weights", "are", "more", "uniform", "at", "lower", "layers", "to", "collect", "multiple", "information", "while", "more", "concentrated", "on", "the", "specific", "inputs", "at", "higher", "layers", ".", "detailed", "analyses", "also", "show", "a", "great", "need", "for", "calibration", "in", "the", "attention", "weights", "with", "high", "entropy", "where", "the", "model", "is", "unconfident", "about", "its", "decision", "."]}, {"venue": "ACL", "title": "Implicit Representations of Meaning in Neural Language Models", "abstract": "Does the effectiveness of neural language models derive entirely from accurate modeling of surface word co-occurrence statistics, or do these models represent and reason about the world they describe? In BART and T5 transformer language models, we identify contextual word representations that function as *models of entities and situations* as they evolve throughout a discourse. These neural representations have functional similarities to linguistic models of dynamic semantics: they support a linear readout of each entity\u2019s current properties and relations, and can be manipulated with predictable effects on language generation. Our results indicate that prediction in pretrained neural language models is supported, at least in part, by dynamic representations of meaning and implicit simulation of entity state, and that this behavior can be learned with only text as training data.", "doc_id": "85df6e049bbb03b0c9939c9e7eb865ec", "publication_year": 2021, "sentences": ["does the effectiveness of neural language models derive entirely from accurate modeling of surface word co - occurrence statistics , or do these models represent and reason about the world they describe ?", "in bart and t5 transformer language models , we identify contextual word representations that function as * models of entities and situations * as they evolve throughout a discourse .", "these neural representations have functional similarities to linguistic models of dynamic semantics : they support a linear readout of each entity \u2019 s current properties and relations , and can be manipulated with predictable effects on language generation .", "our results indicate that prediction in pretrained neural language models is supported , at least in part , by dynamic representations of meaning and implicit simulation of entity state , and that this behavior can be learned with only text as training data ."], "events": [{"event_type": "ITT", "arguments": [{"text": "neural language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["neural", "language", "models"], "offsets": [4, 5, 6]}], "trigger": {"text": "derive entirely", "tokens": ["derive", "entirely"], "offsets": [7, 8]}}, {"event_type": "WKS", "arguments": [{"text": "in bart and t5 transformer language models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "bart", "and", "t5", "transformer", "language", "models"], "offsets": [33, 34, 35, 36, 37, 38, 39]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [41]}, {"text": "contextual word representations", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["contextual", "word", "representations"], "offsets": [43, 44, 45]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [42]}}, {"event_type": "FAC", "arguments": [{"text": "in pretrained neural language models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "pretrained", "neural", "language", "models"], "offsets": [107, 108, 109, 110, 111]}, {"text": "by dynamic representations of meaning and implicit simulation of entity state", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "dynamic", "representations", "of", "meaning", "and", "implicit", "simulation", "of", "entity", "state"], "offsets": [120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130]}, {"text": "prediction", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["prediction"], "offsets": [106]}, {"text": "at least in part", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["at", "least", "in", "part"], "offsets": [115, 116, 117, 118]}], "trigger": {"text": "supported", "tokens": ["supported"], "offsets": [113]}}, {"event_type": "FAC", "arguments": [{"text": "with only text as training data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "only", "text", "as", "training", "data"], "offsets": [139, 140, 141, 142, 143, 144]}, {"text": "prediction in pretrained neural language models is supported", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["prediction", "in", "pretrained", "neural", "language", "models", "is", "supported"], "offsets": [106, 107, 108, 109, 110, 111, 112, 113]}], "trigger": {"text": "learned", "tokens": ["learned"], "offsets": [138]}}], "document": ["does", "the", "effectiveness", "of", "neural", "language", "models", "derive", "entirely", "from", "accurate", "modeling", "of", "surface", "word", "co", "-", "occurrence", "statistics", ",", "or", "do", "these", "models", "represent", "and", "reason", "about", "the", "world", "they", "describe", "?", "in", "bart", "and", "t5", "transformer", "language", "models", ",", "we", "identify", "contextual", "word", "representations", "that", "function", "as", "*", "models", "of", "entities", "and", "situations", "*", "as", "they", "evolve", "throughout", "a", "discourse", ".", "these", "neural", "representations", "have", "functional", "similarities", "to", "linguistic", "models", "of", "dynamic", "semantics", ":", "they", "support", "a", "linear", "readout", "of", "each", "entity", "\u2019", "s", "current", "properties", "and", "relations", ",", "and", "can", "be", "manipulated", "with", "predictable", "effects", "on", "language", "generation", ".", "our", "results", "indicate", "that", "prediction", "in", "pretrained", "neural", "language", "models", "is", "supported", ",", "at", "least", "in", "part", ",", "by", "dynamic", "representations", "of", "meaning", "and", "implicit", "simulation", "of", "entity", "state", ",", "and", "that", "this", "behavior", "can", "be", "learned", "with", "only", "text", "as", "training", "data", "."]}, {"venue": "ACL", "title": "Breaking Through the 80% Glass Ceiling: Raising the State of the Art in Word Sense Disambiguation by Incorporating Knowledge Graph Information", "abstract": "Neural architectures are the current state of the art in Word Sense Disambiguation (WSD). However, they make limited use of the vast amount of relational information encoded in Lexical Knowledge Bases (LKB). We present Enhanced WSD Integrating Synset Embeddings and Relations (EWISER), a neural supervised architecture that is able to tap into this wealth of knowledge by embedding information from the LKB graph within the neural architecture, and to exploit pretrained synset embeddings, enabling the network to predict synsets that are not in the training set. As a result, we set a new state of the art on almost all the evaluation settings considered, also breaking through, for the first time, the 80% ceiling on the concatenation of all the standard all-words English WSD evaluation benchmarks. On multilingual all-words WSD, we report state-of-the-art results by training on nothing but English.", "doc_id": "f7d66e3283bc05777fe3899415c13e34", "publication_year": 2020, "sentences": ["neural architectures are the current state of the art in word sense disambiguation ( wsd ) .", "however , they make limited use of the vast amount of relational information encoded in lexical knowledge bases ( lkb ) .", "we present enhanced wsd integrating synset embeddings and relations ( ewiser ) , a neural supervised architecture that is able to tap into this wealth of knowledge by embedding information from the lkb graph within the neural architecture , and to exploit pretrained synset embeddings , enabling the network to predict synsets that are not in the training set .", "as a result , we set a new state of the art on almost all the evaluation settings considered , also breaking through , for the first time , the 80 % ceiling on the concatenation of all the standard all - words english wsd evaluation benchmarks .", "on multilingual all - words wsd , we report state - of - the - art results by training on nothing but english ."], "events": [{"event_type": "ITT", "arguments": [{"text": "word sense disambiguation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["word", "sense", "disambiguation"], "offsets": [10, 11, 12]}], "trigger": {"text": "art", "tokens": ["art"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "neural architectures", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["neural", "architectures"], "offsets": [0, 1]}, {"text": "lexical knowledge bases", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["lexical", "knowledge", "bases"], "offsets": [32, 33, 34]}, {"text": "make limited use of", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["make", "limited", "use", "of"], "offsets": [20, 21, 22, 23]}], "trigger": {"text": "make limited use of", "tokens": ["make", "limited", "use", "of"], "offsets": [20, 21, 22, 23]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [39]}, {"text": "enhanced wsd integrating synset embeddings and relations", "nugget_type": "APP", "argument_type": "Content", "tokens": ["enhanced", "wsd", "integrating", "synset", "embeddings", "and", "relations"], "offsets": [41, 42, 43, 44, 45, 46, 47]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [40]}}, {"event_type": "MDS", "arguments": [{"text": "tap", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["tap"], "offsets": [60]}, {"text": "information from the lkb graph", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["information", "from", "the", "lkb", "graph"], "offsets": [68, 69, 70, 71, 72]}, {"text": "neural architecture", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["neural", "architecture"], "offsets": [75, 76]}], "trigger": {"text": "embedding", "tokens": ["embedding"], "offsets": [67]}}, {"event_type": "PUR", "arguments": [{"text": "knowledge", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["knowledge"], "offsets": [65]}], "trigger": {"text": "tap", "tokens": ["tap"], "offsets": [60]}}, {"event_type": "MDS", "arguments": [{"text": "pretrained synset embeddings", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["pretrained", "synset", "embeddings"], "offsets": [81, 82, 83]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [89]}], "trigger": {"text": "exploit", "tokens": ["exploit"], "offsets": [80]}}, {"event_type": "PUR", "arguments": [{"text": "synsets that are not in the training set", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["synsets", "that", "are", "not", "in", "the", "training", "set"], "offsets": [90, 91, 92, 93, 94, 95, 96, 97]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [89]}}, {"event_type": "FAC", "arguments": [{"text": "state of the art", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "of", "the", "art"], "offsets": [107, 108, 109, 110]}, {"text": "enhanced wsd integrating synset embeddings and relations", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["enhanced", "wsd", "integrating", "synset", "embeddings", "and", "relations"], "offsets": [41, 42, 43, 44, 45, 46, 47]}, {"text": "on almost all the evaluation settings considered", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "almost", "all", "the", "evaluation", "settings", "considered"], "offsets": [111, 112, 113, 114, 115, 116, 117]}], "trigger": {"text": "set", "tokens": ["set"], "offsets": [104]}}, {"event_type": "FAC", "arguments": [{"text": "80 % ceiling", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["80", "%", "ceiling"], "offsets": [129, 130, 131]}, {"text": "on the concatenation of all the standard all - words english wsd evaluation benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "concatenation", "of", "all", "the", "standard", "all", "-", "words", "english", "word", "sense", "disambiguation", "evaluation", "benchmarks"], "offsets": [132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 10, 11, 12, 144, 145]}, {"text": "enhanced wsd integrating synset embeddings and relations", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["enhanced", "wsd", "integrating", "synset", "embeddings", "and", "relations"], "offsets": [41, 42, 43, 44, 45, 46, 47]}], "trigger": {"text": "breaking", "tokens": ["breaking"], "offsets": [120]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [154]}, {"text": "state - of - the - art results", "nugget_type": "STR", "argument_type": "Content", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [156, 157, 158, 159, 160, 161, 162, 163]}, {"text": "by training on nothing but english", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "training", "on", "nothing", "but", "english"], "offsets": [164, 165, 166, 167, 168, 169]}, {"text": "multilingual all - words wsd", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multilingual", "all", "-", "words", "wsd"], "offsets": [148, 149, 150, 151, 152]}], "trigger": {"text": "report", "tokens": ["report"], "offsets": [155]}}], "document": ["neural", "architectures", "are", "the", "current", "state", "of", "the", "art", "in", "word", "sense", "disambiguation", "(", "wsd", ")", ".", "however", ",", "they", "make", "limited", "use", "of", "the", "vast", "amount", "of", "relational", "information", "encoded", "in", "lexical", "knowledge", "bases", "(", "lkb", ")", ".", "we", "present", "enhanced", "wsd", "integrating", "synset", "embeddings", "and", "relations", "(", "ewiser", ")", ",", "a", "neural", "supervised", "architecture", "that", "is", "able", "to", "tap", "into", "this", "wealth", "of", "knowledge", "by", "embedding", "information", "from", "the", "lkb", "graph", "within", "the", "neural", "architecture", ",", "and", "to", "exploit", "pretrained", "synset", "embeddings", ",", "enabling", "the", "network", "to", "predict", "synsets", "that", "are", "not", "in", "the", "training", "set", ".", "as", "a", "result", ",", "we", "set", "a", "new", "state", "of", "the", "art", "on", "almost", "all", "the", "evaluation", "settings", "considered", ",", "also", "breaking", "through", ",", "for", "the", "first", "time", ",", "the", "80", "%", "ceiling", "on", "the", "concatenation", "of", "all", "the", "standard", "all", "-", "words", "english", "wsd", "evaluation", "benchmarks", ".", "on", "multilingual", "all", "-", "words", "wsd", ",", "we", "report", "state", "-", "of", "-", "the", "-", "art", "results", "by", "training", "on", "nothing", "but", "english", "."]}, {"venue": "ACL", "title": "Probing Simile Knowledge from Pre-trained Language Models", "abstract": "Simile interpretation (SI) and simile generation (SG) are challenging tasks for NLP because models require adequate world knowledge to produce predictions. Previous works have employed many hand-crafted resources to bring knowledge-related into models, which is time-consuming and labor-intensive. In recent years, pre-trained language models (PLMs) based approaches have become the de-facto standard in NLP since they learn generic knowledge from a large corpus. The knowledge embedded in PLMs may be useful for SI and SG tasks. Nevertheless, there are few works to explore it. In this paper, we probe simile knowledge from PLMs to solve the SI and SG tasks in the unified framework of simile triple completion for the first time. The backbone of our framework is to construct masked sentences with manual patterns and then predict the candidate words in the masked position. In this framework, we adopt a secondary training process (Adjective-Noun mask Training) with the masked language model (MLM) loss to enhance the prediction diversity of candidate words in the masked position. Moreover, pattern ensemble (PE) and pattern search (PS) are applied to improve the quality of predicted words. Finally, automatic and human evaluations demonstrate the effectiveness of our framework in both SI and SG tasks.", "doc_id": "f1b4a45cf4f49b209cdb34779dd3d0d9", "publication_year": 2022, "sentences": ["simile interpretation ( si ) and simile generation ( sg ) are challenging tasks for nlp because models require adequate world knowledge to produce predictions .", "previous works have employed many hand - crafted resources to bring knowledge - related into models , which is time - consuming and labor - intensive .", "in recent years , pre - trained language models ( plms ) based approaches have become the de - facto standard in nlp since they learn generic knowledge from a large corpus .", "the knowledge embedded in plms may be useful for si and sg tasks .", "nevertheless , there are few works to explore it .", "in this paper , we probe simile knowledge from plms to solve the si and sg tasks in the unified framework of simile triple completion for the first time .", "the backbone of our framework is to construct masked sentences with manual patterns and then predict the candidate words in the masked position .", "in this framework , we adopt a secondary training process ( adjective - noun mask training ) with the masked language model ( mlm ) loss to enhance the prediction diversity of candidate words in the masked position .", "moreover , pattern ensemble ( pe ) and pattern search ( ps ) are applied to improve the quality of predicted words .", "finally , automatic and human evaluations demonstrate the effectiveness of our framework in both si and sg tasks ."], "events": [{"event_type": "ITT", "arguments": [{"text": "simile interpretation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["simile", "interpretation"], "offsets": [0, 1]}, {"text": "simile generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sg"], "offsets": [125]}], "trigger": {"text": "tasks", "tokens": ["tasks"], "offsets": [13]}}, {"event_type": "RWS", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["models"], "offsets": [41]}, {"text": "knowledge - related", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["knowledge", "-", "related"], "offsets": [37, 38, 39]}], "trigger": {"text": "bring", "tokens": ["bring"], "offsets": [36]}}, {"event_type": "RWF", "arguments": [{"text": "time - consuming", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["time", "-", "consuming"], "offsets": [45, 46, 47]}], "trigger": {"text": "time - consuming", "tokens": ["time", "-", "consuming"], "offsets": [45, 46, 47]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [114]}, {"text": "solve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["solve"], "offsets": [121]}, {"text": "simile knowledge from plms", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["simile", "knowledge", "from", "plms"], "offsets": [116, 117, 118, 119]}, {"text": "in the unified framework of simile triple completion", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "unified", "framework", "of", "simile", "triple", "completion"], "offsets": [127, 128, 129, 130, 131, 132, 133, 134]}], "trigger": {"text": "probe", "tokens": ["probe"], "offsets": [115]}}, {"event_type": "PUR", "arguments": [{"text": "si and sg tasks", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["simile", "interpretation", "and", "sg", "tasks"], "offsets": [0, 1, 124, 125, 126]}], "trigger": {"text": "solve", "tokens": ["solve"], "offsets": [121]}}, {"event_type": "MDS", "arguments": [{"text": "masked sentences", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["masked", "sentences"], "offsets": [148, 149]}, {"text": "with manual patterns", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "manual", "patterns"], "offsets": [150, 151, 152]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [147]}}, {"event_type": "MDS", "arguments": [{"text": "candidate words", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["candidate", "words"], "offsets": [157, 158]}, {"text": "in the masked position", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "masked", "position"], "offsets": [159, 160, 161, 162]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [155]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [168]}, {"text": "secondary training process", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["secondary", "training", "process"], "offsets": [171, 172, 173]}, {"text": "masked language model ( mlm ) loss", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["masked", "language", "model", "loss"], "offsets": [183, 184, 185, 189]}, {"text": "enhance", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enhance"], "offsets": [191]}], "trigger": {"text": "adopt", "tokens": ["adopt"], "offsets": [169]}}, {"event_type": "PUR", "arguments": [{"text": "in the masked position", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "masked", "position"], "offsets": [198, 199, 200, 201]}, {"text": "prediction diversity of candidate words", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["prediction", "diversity", "of", "candidate", "words"], "offsets": [193, 194, 195, 196, 197]}], "trigger": {"text": "enhance", "tokens": ["enhance"], "offsets": [191]}}, {"event_type": "WKS", "arguments": [{"text": "pattern ensemble", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pattern", "ensemble"], "offsets": [205, 206]}, {"text": "pattern search", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pattern", "search"], "offsets": [211, 212]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [219]}], "trigger": {"text": "applied", "tokens": ["applied"], "offsets": [217]}}, {"event_type": "PUR", "arguments": [{"text": "quality of predicted words", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["quality", "of", "predicted", "words"], "offsets": [221, 222, 223, 224]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [219]}}, {"event_type": "FAC", "arguments": [{"text": "effectiveness of our framework", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["effectiveness", "of", "our", "framework"], "offsets": [234, 235, 236, 237]}, {"text": "si tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["simile", "interpretation", "tasks"], "offsets": [0, 1, 243]}, {"text": "sg tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sg", "tasks"], "offsets": [242, 243]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [232]}}], "document": ["simile", "interpretation", "(", "si", ")", "and", "simile", "generation", "(", "sg", ")", "are", "challenging", "tasks", "for", "nlp", "because", "models", "require", "adequate", "world", "knowledge", "to", "produce", "predictions", ".", "previous", "works", "have", "employed", "many", "hand", "-", "crafted", "resources", "to", "bring", "knowledge", "-", "related", "into", "models", ",", "which", "is", "time", "-", "consuming", "and", "labor", "-", "intensive", ".", "in", "recent", "years", ",", "pre", "-", "trained", "language", "models", "(", "plms", ")", "based", "approaches", "have", "become", "the", "de", "-", "facto", "standard", "in", "nlp", "since", "they", "learn", "generic", "knowledge", "from", "a", "large", "corpus", ".", "the", "knowledge", "embedded", "in", "plms", "may", "be", "useful", "for", "si", "and", "sg", "tasks", ".", "nevertheless", ",", "there", "are", "few", "works", "to", "explore", "it", ".", "in", "this", "paper", ",", "we", "probe", "simile", "knowledge", "from", "plms", "to", "solve", "the", "si", "and", "sg", "tasks", "in", "the", "unified", "framework", "of", "simile", "triple", "completion", "for", "the", "first", "time", ".", "the", "backbone", "of", "our", "framework", "is", "to", "construct", "masked", "sentences", "with", "manual", "patterns", "and", "then", "predict", "the", "candidate", "words", "in", "the", "masked", "position", ".", "in", "this", "framework", ",", "we", "adopt", "a", "secondary", "training", "process", "(", "adjective", "-", "noun", "mask", "training", ")", "with", "the", "masked", "language", "model", "(", "mlm", ")", "loss", "to", "enhance", "the", "prediction", "diversity", "of", "candidate", "words", "in", "the", "masked", "position", ".", "moreover", ",", "pattern", "ensemble", "(", "pe", ")", "and", "pattern", "search", "(", "ps", ")", "are", "applied", "to", "improve", "the", "quality", "of", "predicted", "words", ".", "finally", ",", "automatic", "and", "human", "evaluations", "demonstrate", "the", "effectiveness", "of", "our", "framework", "in", "both", "si", "and", "sg", "tasks", "."]}, {"venue": "ACL", "title": "Style is NOT a single variable: Case Studies for Cross-Stylistic Language Understanding", "abstract": "Every natural text is written in some style. Style is formed by a complex combination of different stylistic factors, including formality markers, emotions, metaphors, etc. One cannot form a complete understanding of a text without considering these factors. The factors combine and co-vary in complex ways to form styles. Studying the nature of the covarying combinations sheds light on stylistic language in general, sometimes called cross-style language understanding. This paper provides the benchmark corpus (XSLUE) that combines existing datasets and collects a new one for sentence-level cross-style language understanding and evaluation. The benchmark contains text in 15 different styles under the proposed four theoretical groupings: figurative, personal, affective, and interpersonal groups. For valid evaluation, we collect an additional diagnostic set by annotating all 15 styles on the same text. Using XSLUE, we propose three interesting cross-style applications in classification, correlation, and generation. First, our proposed cross-style classifier trained with multiple styles together helps improve overall classification performance against individually-trained style classifiers. Second, our study shows that some styles are highly dependent on each other in human-written text. Finally, we find that combinations of some contradictive styles likely generate stylistically less appropriate text. We believe our benchmark and case studies help explore interesting future directions for cross-style research. The preprocessed datasets and code are publicly available.", "doc_id": "62b9ced26466d7b28d870b75bcc3acb7", "publication_year": 2021, "sentences": ["every natural text is written in some style .", "style is formed by a complex combination of different stylistic factors , including formality markers , emotions , metaphors , etc .", "one cannot form a complete understanding of a text without considering these factors .", "the factors combine and co - vary in complex ways to form styles .", "studying the nature of the covarying combinations sheds light on stylistic language in general , sometimes called cross - style language understanding .", "this paper provides the benchmark corpus ( xslue ) that combines existing datasets and collects a new one for sentence - level cross - style language understanding and evaluation .", "the benchmark contains text in 15 different styles under the proposed four theoretical groupings : figurative , personal , affective , and interpersonal groups .", "for valid evaluation , we collect an additional diagnostic set by annotating all 15 styles on the same text .", "using xslue , we propose three interesting cross - style applications in classification , correlation , and generation .", "first , our proposed cross - style classifier trained with multiple styles together helps improve overall classification performance against individually - trained style classifiers .", "second , our study shows that some styles are highly dependent on each other in human - written text .", "finally , we find that combinations of some contradictive styles likely generate stylistically less appropriate text .", "we believe our benchmark and case studies help explore interesting future directions for cross - style research .", "the preprocessed datasets and code are publicly available ."], "events": [{"event_type": "ITT", "arguments": [{"text": "cross - style language understanding", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["cross", "-", "style", "language", "understanding"], "offsets": [76, 77, 78, 79, 80]}], "trigger": {"text": "called", "tokens": ["called"], "offsets": [75]}}, {"event_type": "PRP", "arguments": [{"text": "benchmark corpus", "nugget_type": "DST", "argument_type": "Content", "tokens": ["benchmark", "corpus"], "offsets": [86, 87]}, {"text": "sentence - level cross - style language understanding", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sentence", "-", "level", "cross", "-", "style", "language", "understanding"], "offsets": [101, 102, 103, 104, 105, 106, 107, 108]}, {"text": "sentence - level cross - style language evaluation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sentence", "-", "level", "cross", "-", "style", "language", "evaluation"], "offsets": [101, 102, 103, 104, 105, 106, 107, 110]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [84]}}, {"event_type": "WKS", "arguments": [{"text": "existing datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["existing", "datasets"], "offsets": [93, 94]}], "trigger": {"text": "combines", "tokens": ["combines"], "offsets": [92]}}, {"event_type": "WKS", "arguments": [{"text": "benchmark corpus", "nugget_type": "DST", "argument_type": "Content", "tokens": ["benchmark", "corpus"], "offsets": [86, 87]}], "trigger": {"text": "collects", "tokens": ["collects"], "offsets": [96]}}, {"event_type": "FAC", "arguments": [{"text": "text in 15 different styles", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["text", "in", "15", "different", "styles"], "offsets": [115, 116, 117, 118, 119]}, {"text": "benchmark", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["benchmark"], "offsets": [113]}, {"text": "under the proposed four theoretical groupings : figurative , personal , affective , and interpersonal groups", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["under", "the", "proposed", "four", "theoretical", "groupings", ":", "figurative", ",", "personal", ",", "affective", ",", "and", "interpersonal", "groups"], "offsets": [120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135]}], "trigger": {"text": "contains", "tokens": ["contains"], "offsets": [114]}}, {"event_type": "WKS", "arguments": [{"text": "valid evaluation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["valid", "evaluation"], "offsets": [138, 139]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [141]}, {"text": "additional diagnostic set", "nugget_type": "DST", "argument_type": "Content", "tokens": ["additional", "diagnostic", "set"], "offsets": [144, 145, 146]}, {"text": "by annotating all 15 styles on the same text", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "annotating", "all", "15", "styles", "on", "the", "same", "text"], "offsets": [147, 148, 149, 150, 151, 152, 153, 154, 155]}], "trigger": {"text": "collect", "tokens": ["collect"], "offsets": [142]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [160]}, {"text": "three interesting cross - style applications", "nugget_type": "APP", "argument_type": "Content", "tokens": ["three", "interesting", "cross", "-", "style", "applications"], "offsets": [162, 163, 164, 165, 166, 167]}, {"text": "classification", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["classification"], "offsets": [169]}, {"text": "correlation", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["correlation"], "offsets": [171]}, {"text": "generation", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["generation"], "offsets": [174]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [161]}}, {"event_type": "MDS", "arguments": [{"text": "cross - style classifier", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["cross", "-", "style", "classifier"], "offsets": [180, 181, 182, 183]}, {"text": "with multiple styles", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "multiple", "styles"], "offsets": [185, 186, 187]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [190]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [184]}}, {"event_type": "PUR", "arguments": [{"text": "overall classification performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["overall", "classification", "performance"], "offsets": [191, 192, 193]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [190]}}, {"event_type": "FAC", "arguments": [{"text": "highly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["highly"], "offsets": [210]}, {"text": "styles", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["styles"], "offsets": [208]}, {"text": "in human - written text", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "human", "-", "written", "text"], "offsets": [215, 216, 217, 218, 219]}], "trigger": {"text": "dependent on", "tokens": ["dependent", "on"], "offsets": [211, 212]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [223]}, {"text": "generate", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["generate"], "offsets": [232]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [224]}}, {"event_type": "FAC", "arguments": [{"text": "stylistically less appropriate text", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["stylistically", "less", "appropriate", "text"], "offsets": [233, 234, 235, 236]}, {"text": "combinations of some contradictive styles", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["combinations", "of", "some", "contradictive", "styles"], "offsets": [226, 227, 228, 229, 230]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [232]}}, {"event_type": "FAC", "arguments": [{"text": "interesting future directions", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["interesting", "future", "directions"], "offsets": [247, 248, 249]}, {"text": "cross - style research", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["cross", "-", "style", "research"], "offsets": [251, 252, 253, 254]}, {"text": "benchmark studies", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["benchmark", "studies"], "offsets": [241, 244]}, {"text": "case studies", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["case", "studies"], "offsets": [243, 244]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [246]}}, {"event_type": "FAC", "arguments": [{"text": "publicly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["publicly"], "offsets": [262]}, {"text": "preprocessed datasets and code", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["preprocessed", "datasets", "and", "code"], "offsets": [257, 258, 259, 260]}], "trigger": {"text": "available", "tokens": ["available"], "offsets": [263]}}], "document": ["every", "natural", "text", "is", "written", "in", "some", "style", ".", "style", "is", "formed", "by", "a", "complex", "combination", "of", "different", "stylistic", "factors", ",", "including", "formality", "markers", ",", "emotions", ",", "metaphors", ",", "etc", ".", "one", "cannot", "form", "a", "complete", "understanding", "of", "a", "text", "without", "considering", "these", "factors", ".", "the", "factors", "combine", "and", "co", "-", "vary", "in", "complex", "ways", "to", "form", "styles", ".", "studying", "the", "nature", "of", "the", "covarying", "combinations", "sheds", "light", "on", "stylistic", "language", "in", "general", ",", "sometimes", "called", "cross", "-", "style", "language", "understanding", ".", "this", "paper", "provides", "the", "benchmark", "corpus", "(", "xslue", ")", "that", "combines", "existing", "datasets", "and", "collects", "a", "new", "one", "for", "sentence", "-", "level", "cross", "-", "style", "language", "understanding", "and", "evaluation", ".", "the", "benchmark", "contains", "text", "in", "15", "different", "styles", "under", "the", "proposed", "four", "theoretical", "groupings", ":", "figurative", ",", "personal", ",", "affective", ",", "and", "interpersonal", "groups", ".", "for", "valid", "evaluation", ",", "we", "collect", "an", "additional", "diagnostic", "set", "by", "annotating", "all", "15", "styles", "on", "the", "same", "text", ".", "using", "xslue", ",", "we", "propose", "three", "interesting", "cross", "-", "style", "applications", "in", "classification", ",", "correlation", ",", "and", "generation", ".", "first", ",", "our", "proposed", "cross", "-", "style", "classifier", "trained", "with", "multiple", "styles", "together", "helps", "improve", "overall", "classification", "performance", "against", "individually", "-", "trained", "style", "classifiers", ".", "second", ",", "our", "study", "shows", "that", "some", "styles", "are", "highly", "dependent", "on", "each", "other", "in", "human", "-", "written", "text", ".", "finally", ",", "we", "find", "that", "combinations", "of", "some", "contradictive", "styles", "likely", "generate", "stylistically", "less", "appropriate", "text", ".", "we", "believe", "our", "benchmark", "and", "case", "studies", "help", "explore", "interesting", "future", "directions", "for", "cross", "-", "style", "research", ".", "the", "preprocessed", "datasets", "and", "code", "are", "publicly", "available", "."]}, {"venue": "ACL", "title": "A Complete Shift-Reduce Chinese Discourse Parser with Robust Dynamic Oracle", "abstract": "This work proposes a standalone, complete Chinese discourse parser for practical applications. We approach Chinese discourse parsing from a variety of aspects and improve the shift-reduce parser not only by integrating the pre-trained text encoder, but also by employing novel training strategies. We revise the dynamic-oracle procedure for training the shift-reduce parser, and apply unsupervised data augmentation to enhance rhetorical relation recognition. Experimental results show that our Chinese discourse parser achieves the state-of-the-art performance.", "doc_id": "8501b66c1f8d6534c71d3376cc5aa925", "publication_year": 2020, "sentences": ["this work proposes a standalone , complete chinese discourse parser for practical applications .", "we approach chinese discourse parsing from a variety of aspects and improve the shift - reduce parser not only by integrating the pre - trained text encoder , but also by employing novel training strategies .", "we revise the dynamic - oracle procedure for training the shift - reduce parser , and apply unsupervised data augmentation to enhance rhetorical relation recognition .", "experimental results show that our chinese discourse parser achieves the state - of - the - art performance ."], "events": [{"event_type": "PRP", "arguments": [{"text": "chinese discourse parser", "nugget_type": "APP", "argument_type": "Content", "tokens": ["chinese", "discourse", "parser"], "offsets": [7, 8, 9]}, {"text": "practical applications", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["practical", "applications"], "offsets": [11, 12]}], "trigger": {"text": "proposes", "tokens": ["proposes"], "offsets": [2]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [50]}, {"text": "dynamic - oracle procedure", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["dynamic", "-", "oracle", "procedure"], "offsets": [53, 54, 55, 56]}, {"text": "training", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["training"], "offsets": [58]}], "trigger": {"text": "revise", "tokens": ["revise"], "offsets": [51]}}, {"event_type": "PUR", "arguments": [{"text": "shift - reduce parser", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["shift", "-", "reduce", "parser"], "offsets": [60, 61, 62, 63]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [58]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [50]}, {"text": "unsupervised data augmentation", "nugget_type": "APP", "argument_type": "Content", "tokens": ["unsupervised", "data", "augmentation"], "offsets": [67, 68, 69]}, {"text": "enhance", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enhance"], "offsets": [71]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [66]}}, {"event_type": "PUR", "arguments": [{"text": "rhetorical relation recognition", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["rhetorical", "relation", "recognition"], "offsets": [72, 73, 74]}], "trigger": {"text": "enhance", "tokens": ["enhance"], "offsets": [71]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [84]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [78]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [86, 87, 88, 89, 90, 91, 92, 93]}, {"text": "chinese discourse parser", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["chinese", "discourse", "parser"], "offsets": [7, 8, 9]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [84]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [14]}, {"text": "chinese discourse parsing", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["chinese", "discourse", "parsing"], "offsets": [16, 17, 18]}, {"text": "from a variety of aspects", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "a", "variety", "of", "aspects"], "offsets": [19, 20, 21, 22, 23]}], "trigger": {"text": "approach", "tokens": ["approach"], "offsets": [15]}}, {"event_type": "MDS", "arguments": [{"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [25]}, {"text": "pre - trained text encoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["pre", "-", "trained", "text", "encoder"], "offsets": [36, 37, 38, 39, 40]}], "trigger": {"text": "integrating", "tokens": ["integrating"], "offsets": [34]}}, {"event_type": "MDS", "arguments": [{"text": "novel training strategies", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["novel", "training", "strategies"], "offsets": [46, 47, 48]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [25]}], "trigger": {"text": "employing", "tokens": ["employing"], "offsets": [45]}}, {"event_type": "PUR", "arguments": [{"text": "shift - reduce parser", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["shift", "-", "reduce", "parser"], "offsets": [27, 28, 29, 30]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [25]}}], "document": ["this", "work", "proposes", "a", "standalone", ",", "complete", "chinese", "discourse", "parser", "for", "practical", "applications", ".", "we", "approach", "chinese", "discourse", "parsing", "from", "a", "variety", "of", "aspects", "and", "improve", "the", "shift", "-", "reduce", "parser", "not", "only", "by", "integrating", "the", "pre", "-", "trained", "text", "encoder", ",", "but", "also", "by", "employing", "novel", "training", "strategies", ".", "we", "revise", "the", "dynamic", "-", "oracle", "procedure", "for", "training", "the", "shift", "-", "reduce", "parser", ",", "and", "apply", "unsupervised", "data", "augmentation", "to", "enhance", "rhetorical", "relation", "recognition", ".", "experimental", "results", "show", "that", "our", "chinese", "discourse", "parser", "achieves", "the", "state", "-", "of", "-", "the", "-", "art", "performance", "."]}, {"venue": "ACL", "title": "An Investigation of Transfer Learning-Based Sentiment Analysis in Japanese", "abstract": "Text classification approaches have usually required task-specific model architectures and huge labeled datasets. Recently, thanks to the rise of text-based transfer learning techniques, it is possible to pre-train a language model in an unsupervised manner and leverage them to perform effective on downstream tasks. In this work we focus on Japanese and show the potential use of transfer learning techniques in text classification. Specifically, we perform binary and multi-class sentiment classification on the Rakuten product review and Yahoo movie review datasets. We show that transfer learning-based approaches perform better than task-specific models trained on 3 times as much data. Furthermore, these approaches perform just as well for language modeling pre-trained on only 1/30 of the data. We release our pre-trained models and code as open source.", "doc_id": "2a31957e808e575bbd56a90c15728881", "publication_year": 2019, "sentences": ["text classification approaches have usually required task - specific model architectures and huge labeled datasets .", "recently , thanks to the rise of text - based transfer learning techniques , it is possible to pre - train a language model in an unsupervised manner and leverage them to perform effective on downstream tasks .", "in this work we focus on japanese and show the potential use of transfer learning techniques in text classification .", "specifically , we perform binary and multi - class sentiment classification on the rakuten product review and yahoo movie review datasets .", "we show that transfer learning - based approaches perform better than task - specific models trained on 3 times as much data .", "furthermore , these approaches perform just as well for language modeling pre - trained on only 1 / 30 of the data .", "we release our pre - trained models and code as open source ."], "events": [{"event_type": "ITT", "arguments": [{"text": "text classification approaches", "nugget_type": "APP", "argument_type": "Target", "tokens": ["text", "classification", "approaches"], "offsets": [0, 1, 2]}], "trigger": {"text": "required", "tokens": ["required"], "offsets": [5]}}, {"event_type": "RWS", "arguments": [{"text": "language model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["language", "model"], "offsets": [38, 39]}, {"text": "in an unsupervised manner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "an", "unsupervised", "manner"], "offsets": [40, 41, 42, 43]}], "trigger": {"text": "pre - train", "tokens": ["pre", "-", "train"], "offsets": [34, 35, 36]}}], "document": ["text", "classification", "approaches", "have", "usually", "required", "task", "-", "specific", "model", "architectures", "and", "huge", "labeled", "datasets", ".", "recently", ",", "thanks", "to", "the", "rise", "of", "text", "-", "based", "transfer", "learning", "techniques", ",", "it", "is", "possible", "to", "pre", "-", "train", "a", "language", "model", "in", "an", "unsupervised", "manner", "and", "leverage", "them", "to", "perform", "effective", "on", "downstream", "tasks", ".", "in", "this", "work", "we", "focus", "on", "japanese", "and", "show", "the", "potential", "use", "of", "transfer", "learning", "techniques", "in", "text", "classification", ".", "specifically", ",", "we", "perform", "binary", "and", "multi", "-", "class", "sentiment", "classification", "on", "the", "rakuten", "product", "review", "and", "yahoo", "movie", "review", "datasets", ".", "we", "show", "that", "transfer", "learning", "-", "based", "approaches", "perform", "better", "than", "task", "-", "specific", "models", "trained", "on", "3", "times", "as", "much", "data", ".", "furthermore", ",", "these", "approaches", "perform", "just", "as", "well", "for", "language", "modeling", "pre", "-", "trained", "on", "only", "1", "/", "30", "of", "the", "data", ".", "we", "release", "our", "pre", "-", "trained", "models", "and", "code", "as", "open", "source", "."]}, {"venue": "ACL", "title": "A Knowledge-Guided Framework for Frame Identification", "abstract": "Frame Identification (FI) is a fundamental and challenging task in frame semantic parsing. The task aims to find the exact frame evoked by a target word in a given sentence. It is generally regarded as a classification task in existing work, where frames are treated as discrete labels or represented using onehot embeddings. However, the valuable knowledge about frames is neglected. In this paper, we propose a Knowledge-Guided Frame Identification framework (KGFI) that integrates three types frame knowledge, including frame definitions, frame elements and frame-to-frame relations, to learn better frame representation, which guides the KGFI to jointly map target words and frames into the same embedding space and subsequently identify the best frame by calculating the dot-product similarity scores between the target word embedding and all of the frame embeddings. The extensive experimental results demonstrate KGFI significantly outperforms the state-of-the-art methods on two benchmark datasets.", "doc_id": "f66ca99ff657163899bc443b3c598c62", "publication_year": 2021, "sentences": ["frame identification ( fi ) is a fundamental and challenging task in frame semantic parsing .", "the task aims to find the exact frame evoked by a target word in a given sentence .", "it is generally regarded as a classification task in existing work , where frames are treated as discrete labels or represented using onehot embeddings .", "however , the valuable knowledge about frames is neglected .", "in this paper , we propose a knowledge - guided frame identification framework ( kgfi ) that integrates three types frame knowledge , including frame definitions , frame elements and frame - to - frame relations , to learn better frame representation , which guides the kgfi to jointly map target words and frames into the same embedding space and subsequently identify the best frame by calculating the dot - product similarity scores between the target word embedding and all of the frame embeddings .", "the extensive experimental results demonstrate kgfi significantly outperforms the state - of - the - art methods on two benchmark datasets ."], "events": [{"event_type": "ITT", "arguments": [{"text": "frame semantic parsing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["frame", "semantic", "parsing"], "offsets": [12, 13, 14]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "neglected", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["neglected"], "offsets": [67]}], "trigger": {"text": "neglected", "tokens": ["neglected"], "offsets": [67]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [73]}, {"text": "knowledge - guided frame identification framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["knowledge", "-", "guided", "frame", "identification", "framework"], "offsets": [76, 77, 78, 79, 80, 81]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [74]}}, {"event_type": "MDS", "arguments": [{"text": "frame definitions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["frame", "definitions"], "offsets": [93, 94]}, {"text": "frame elements", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["frame", "elements"], "offsets": [96, 97]}, {"text": "frame - to - frame relations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["frame", "-", "to", "-", "frame", "relations"], "offsets": [99, 100, 101, 102, 103, 104]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [107]}], "trigger": {"text": "integrates", "tokens": ["integrates"], "offsets": [86]}}, {"event_type": "PUR", "arguments": [{"text": "better frame representation", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["better", "frame", "representation"], "offsets": [108, 109, 110]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [107]}}, {"event_type": "MDS", "arguments": [{"text": "target words", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["target", "words"], "offsets": [119, 120]}, {"text": "frames", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["frames"], "offsets": [122]}, {"text": "same embedding space", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["same", "embedding", "space"], "offsets": [125, 126, 127]}], "trigger": {"text": "jointly map", "tokens": ["jointly", "map"], "offsets": [117, 118]}}, {"event_type": "MDS", "arguments": [{"text": "identify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["identify"], "offsets": [130]}, {"text": "dot - product similarity scores", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["dot", "-", "product", "similarity", "scores"], "offsets": [137, 138, 139, 140, 141]}, {"text": "target word embedding", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["target", "word", "embedding"], "offsets": [144, 145, 146]}, {"text": "frame embeddings", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["frame", "embeddings"], "offsets": [151, 152]}], "trigger": {"text": "calculating", "tokens": ["calculating"], "offsets": [135]}}, {"event_type": "PUR", "arguments": [{"text": "best frame", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["best", "frame"], "offsets": [132, 133]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [130]}}, {"event_type": "FIN", "arguments": [{"text": "significantly outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["significantly", "outperforms"], "offsets": [160, 161]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [158]}}, {"event_type": "CMP", "arguments": [{"text": "knowledge - guided frame identification framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["knowledge", "-", "guided", "frame", "identification", "framework"], "offsets": [76, 77, 78, 79, 80, 81]}, {"text": "state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [163, 164, 165, 166, 167, 168, 169, 170]}, {"text": "two benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "benchmark", "datasets"], "offsets": [172, 173, 174]}], "trigger": {"text": "significantly outperforms", "tokens": ["significantly", "outperforms"], "offsets": [160, 161]}}], "document": ["frame", "identification", "(", "fi", ")", "is", "a", "fundamental", "and", "challenging", "task", "in", "frame", "semantic", "parsing", ".", "the", "task", "aims", "to", "find", "the", "exact", "frame", "evoked", "by", "a", "target", "word", "in", "a", "given", "sentence", ".", "it", "is", "generally", "regarded", "as", "a", "classification", "task", "in", "existing", "work", ",", "where", "frames", "are", "treated", "as", "discrete", "labels", "or", "represented", "using", "onehot", "embeddings", ".", "however", ",", "the", "valuable", "knowledge", "about", "frames", "is", "neglected", ".", "in", "this", "paper", ",", "we", "propose", "a", "knowledge", "-", "guided", "frame", "identification", "framework", "(", "kgfi", ")", "that", "integrates", "three", "types", "frame", "knowledge", ",", "including", "frame", "definitions", ",", "frame", "elements", "and", "frame", "-", "to", "-", "frame", "relations", ",", "to", "learn", "better", "frame", "representation", ",", "which", "guides", "the", "kgfi", "to", "jointly", "map", "target", "words", "and", "frames", "into", "the", "same", "embedding", "space", "and", "subsequently", "identify", "the", "best", "frame", "by", "calculating", "the", "dot", "-", "product", "similarity", "scores", "between", "the", "target", "word", "embedding", "and", "all", "of", "the", "frame", "embeddings", ".", "the", "extensive", "experimental", "results", "demonstrate", "kgfi", "significantly", "outperforms", "the", "state", "-", "of", "-", "the", "-", "art", "methods", "on", "two", "benchmark", "datasets", "."]}, {"venue": "ACL", "title": "Relation Embedding with Dihedral Group in Knowledge Graph", "abstract": "Link prediction is critical for the application of incomplete knowledge graph (KG) in the downstream tasks. As a family of effective approaches for link predictions, embedding methods try to learn low-rank representations for both entities and relations such that the bilinear form defined therein is a well-behaved scoring function. Despite of their successful performances, existing bilinear forms overlook the modeling of relation compositions, resulting in lacks of interpretability for reasoning on KG. To fulfill this gap, we propose a new model called DihEdral, named after dihedral symmetry group. This new model learns knowledge graph embeddings that can capture relation compositions by nature. Furthermore, our approach models the relation embeddings parametrized by discrete values, thereby decrease the solution space drastically. Our experiments show that DihEdral is able to capture all desired properties such as (skew-) symmetry, inversion and (non-) Abelian composition, and outperforms existing bilinear form based approach and is comparable to or better than deep learning models such as ConvE.", "doc_id": "f7c35cad2b20211834b3c94ceb83362d", "publication_year": 2019, "sentences": ["link prediction is critical for the application of incomplete knowledge graph ( kg ) in the downstream tasks .", "as a family of effective approaches for link predictions , embedding methods try to learn low - rank representations for both entities and relations such that the bilinear form defined therein is a well - behaved scoring function .", "despite of their successful performances , existing bilinear forms overlook the modeling of relation compositions , resulting in lacks of interpretability for reasoning on kg .", "to fulfill this gap , we propose a new model called dihedral , named after dihedral symmetry group .", "this new model learns knowledge graph embeddings that can capture relation compositions by nature .", "furthermore , our approach models the relation embeddings parametrized by discrete values , thereby decrease the solution space drastically .", "our experiments show that dihedral is able to capture all desired properties such as ( skew - ) symmetry , inversion and ( non - )", "abelian composition , and outperforms existing bilinear form based approach and is comparable to or better than deep learning models such as conve ."], "events": [{"event_type": "ITT", "arguments": [{"text": "incomplete knowledge graph", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["incomplete", "knowledge", "graph"], "offsets": [8, 9, 10]}], "trigger": {"text": "critical", "tokens": ["critical"], "offsets": [3]}}, {"event_type": "RWS", "arguments": [{"text": "embedding methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["embedding", "methods"], "offsets": [29, 30]}, {"text": "low - rank representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["low", "-", "rank", "representations"], "offsets": [34, 35, 36, 37]}, {"text": "entities", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["entities"], "offsets": [40]}, {"text": "relations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["relations"], "offsets": [42]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [33]}}, {"event_type": "RWF", "arguments": [{"text": "existing bilinear forms", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "bilinear", "forms"], "offsets": [64, 65, 66]}, {"text": "modeling of relation compositions", "nugget_type": "APP", "argument_type": "Fault", "tokens": ["modeling", "of", "relation", "compositions"], "offsets": [69, 70, 71, 72]}], "trigger": {"text": "overlook", "tokens": ["overlook"], "offsets": [67]}}, {"event_type": "MDS", "arguments": [{"text": "knowledge graph embeddings", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["knowledge", "graph", "embeddings"], "offsets": [107, 108, 109]}, {"text": "relation compositions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["relation", "compositions"], "offsets": [113, 114]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [112]}}, {"event_type": "MDS", "arguments": [{"text": "relation embeddings", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["relation", "embeddings"], "offsets": [124, 125]}, {"text": "decrease", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["decrease"], "offsets": [132]}], "trigger": {"text": "models", "tokens": ["models"], "offsets": [122]}}, {"event_type": "PUR", "arguments": [{"text": "solution space", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["solution", "space"], "offsets": [134, 135]}], "trigger": {"text": "decrease", "tokens": ["decrease"], "offsets": [132]}}, {"event_type": "FIN", "arguments": [{"text": "capture", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["capture"], "offsets": [146]}, {"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [168]}, {"text": "comparable to or better", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["comparable", "to", "or", "better"], "offsets": [176, 177, 178, 179]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [140]}}, {"event_type": "FAC", "arguments": [{"text": "all desired properties", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["all", "desired", "properties"], "offsets": [147, 148, 149]}, {"text": "dihedral", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["dihedral"], "offsets": [142]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [146]}}, {"event_type": "CMP", "arguments": [{"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [168]}, {"text": "existing bilinear form based approach", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "bilinear", "form", "based", "approach"], "offsets": [169, 170, 171, 172, 173]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [168]}}, {"event_type": "CMP", "arguments": [{"text": "deep learning models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["deep", "learning", "models"], "offsets": [181, 182, 183]}, {"text": "comparable to or better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["comparable", "to", "or", "better"], "offsets": [176, 177, 178, 179]}], "trigger": {"text": "comparable to or better", "tokens": ["comparable", "to", "or", "better"], "offsets": [176, 177, 178, 179]}}, {"event_type": "RWF", "arguments": [{"text": "lacks of interpretability", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lacks", "of", "interpretability"], "offsets": [76, 77, 78]}, {"text": "reasoning on kg", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["reasoning", "on", "knowledge", "graph"], "offsets": [80, 81, 9, 10]}], "trigger": {"text": "resulting", "tokens": ["resulting"], "offsets": [74]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [89]}, {"text": "dihedral", "nugget_type": "APP", "argument_type": "Content", "tokens": ["dihedral"], "offsets": [95]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [90]}}], "document": ["link", "prediction", "is", "critical", "for", "the", "application", "of", "incomplete", "knowledge", "graph", "(", "kg", ")", "in", "the", "downstream", "tasks", ".", "as", "a", "family", "of", "effective", "approaches", "for", "link", "predictions", ",", "embedding", "methods", "try", "to", "learn", "low", "-", "rank", "representations", "for", "both", "entities", "and", "relations", "such", "that", "the", "bilinear", "form", "defined", "therein", "is", "a", "well", "-", "behaved", "scoring", "function", ".", "despite", "of", "their", "successful", "performances", ",", "existing", "bilinear", "forms", "overlook", "the", "modeling", "of", "relation", "compositions", ",", "resulting", "in", "lacks", "of", "interpretability", "for", "reasoning", "on", "kg", ".", "to", "fulfill", "this", "gap", ",", "we", "propose", "a", "new", "model", "called", "dihedral", ",", "named", "after", "dihedral", "symmetry", "group", ".", "this", "new", "model", "learns", "knowledge", "graph", "embeddings", "that", "can", "capture", "relation", "compositions", "by", "nature", ".", "furthermore", ",", "our", "approach", "models", "the", "relation", "embeddings", "parametrized", "by", "discrete", "values", ",", "thereby", "decrease", "the", "solution", "space", "drastically", ".", "our", "experiments", "show", "that", "dihedral", "is", "able", "to", "capture", "all", "desired", "properties", "such", "as", "(", "skew", "-", ")", "symmetry", ",", "inversion", "and", "(", "non", "-", ")", "abelian", "composition", ",", "and", "outperforms", "existing", "bilinear", "form", "based", "approach", "and", "is", "comparable", "to", "or", "better", "than", "deep", "learning", "models", "such", "as", "conve", "."]}, {"venue": "ACL", "title": "Visual Story Post-Editing", "abstract": "We introduce the first dataset for human edits of machine-generated visual stories and explore how these collected edits may be used for the visual story post-editing task. The dataset ,VIST-Edit, includes 14,905 human-edited versions of 2,981 machine-generated visual stories. The stories were generated by two state-of-the-art visual storytelling models, each aligned to 5 human-edited versions. We establish baselines for the task, showing how a relatively small set of human edits can be leveraged to boost the performance of large visual storytelling models. We also discuss the weak correlation between automatic evaluation scores and human ratings, motivating the need for new automatic metrics.", "doc_id": "37e9e6a719f0d0c645e55ceb2acac304", "publication_year": 2019, "sentences": ["we introduce the first dataset for human edits of machine - generated visual stories and explore how these collected edits may be used for the visual story post - editing task .", "the dataset , vist - edit , includes 14 , 905 human - edited versions of 2 , 981 machine - generated visual stories .", "the stories were generated by two state - of - the - art visual storytelling models , each aligned to 5 human - edited versions .", "we establish baselines for the task , showing how a relatively small set of human edits can be leveraged to boost the performance of large visual storytelling models .", "we also discuss the weak correlation between automatic evaluation scores and human ratings , motivating the need for new automatic metrics ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "dataset for human edits of machine - generated visual stories", "nugget_type": "DST", "argument_type": "Content", "tokens": ["dataset", "for", "human", "edits", "of", "machine", "-", "generated", "visual", "stories"], "offsets": [4, 5, 6, 7, 8, 9, 10, 11, 12, 13]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [1]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "collected edits", "nugget_type": "APP", "argument_type": "Content", "tokens": ["collected", "edits"], "offsets": [18, 19]}, {"text": "visual story post - editing task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["visual", "story", "post", "-", "editing", "task"], "offsets": [25, 26, 27, 28, 29, 30]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [22]}}, {"event_type": "WKS", "arguments": [{"text": "vist - edit", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["vist", "-", "edit"], "offsets": [35, 36, 37]}, {"text": "14 , 905 human - edited versions of 2 , 981 machine - generated visual stories", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["14", ",", "905", "human", "-", "edited", "versions", "of", "2", ",", "981", "machine", "-", "generated", "visual", "stories"], "offsets": [40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]}], "trigger": {"text": "includes", "tokens": ["includes"], "offsets": [39]}}, {"event_type": "RWS", "arguments": [{"text": "stories", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["stories"], "offsets": [58]}, {"text": "two state - of - the - art visual storytelling models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["two", "state", "-", "of", "-", "the", "-", "art", "visual", "storytelling", "models"], "offsets": [62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72]}], "trigger": {"text": "generated", "tokens": ["generated"], "offsets": [60]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [83]}, {"text": "baselines", "nugget_type": "APP", "argument_type": "Content", "tokens": ["baselines"], "offsets": [85]}], "trigger": {"text": "establish", "tokens": ["establish"], "offsets": [84]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [83]}, {"text": "relatively small set of human edits", "nugget_type": "DST", "argument_type": "Content", "tokens": ["relatively", "small", "set", "of", "human", "edits"], "offsets": [93, 94, 95, 96, 97, 98]}, {"text": "boost", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["boost"], "offsets": [103]}], "trigger": {"text": "leveraged", "tokens": ["leveraged"], "offsets": [101]}}, {"event_type": "PUR", "arguments": [{"text": "performance of large visual storytelling models", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["performance", "of", "large", "visual", "storytelling", "models"], "offsets": [105, 106, 107, 108, 109, 110]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [103]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [112]}, {"text": "weak correlation", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["weak", "correlation"], "offsets": [116, 117]}, {"text": "between automatic evaluation scores and human ratings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "automatic", "evaluation", "scores", "and", "human", "ratings"], "offsets": [118, 119, 120, 121, 122, 123, 124]}], "trigger": {"text": "discuss", "tokens": ["discuss"], "offsets": [114]}}], "document": ["we", "introduce", "the", "first", "dataset", "for", "human", "edits", "of", "machine", "-", "generated", "visual", "stories", "and", "explore", "how", "these", "collected", "edits", "may", "be", "used", "for", "the", "visual", "story", "post", "-", "editing", "task", ".", "the", "dataset", ",", "vist", "-", "edit", ",", "includes", "14", ",", "905", "human", "-", "edited", "versions", "of", "2", ",", "981", "machine", "-", "generated", "visual", "stories", ".", "the", "stories", "were", "generated", "by", "two", "state", "-", "of", "-", "the", "-", "art", "visual", "storytelling", "models", ",", "each", "aligned", "to", "5", "human", "-", "edited", "versions", ".", "we", "establish", "baselines", "for", "the", "task", ",", "showing", "how", "a", "relatively", "small", "set", "of", "human", "edits", "can", "be", "leveraged", "to", "boost", "the", "performance", "of", "large", "visual", "storytelling", "models", ".", "we", "also", "discuss", "the", "weak", "correlation", "between", "automatic", "evaluation", "scores", "and", "human", "ratings", ",", "motivating", "the", "need", "for", "new", "automatic", "metrics", "."]}, {"venue": "ACL", "title": "Personalizing Dialogue Agents via Meta-Learning", "abstract": "Existing personalized dialogue models use human designed persona descriptions to improve dialogue consistency. Collecting such descriptions from existing dialogues is expensive and requires hand-crafted feature designs. In this paper, we propose to extend Model-Agnostic Meta-Learning (MAML) (Finn et al., 2017) to personalized dialogue learning without using any persona descriptions. Our model learns to quickly adapt to new personas by leveraging only a few dialogue samples collected from the same user, which is fundamentally different from conditioning the response on the persona descriptions. Empirical results on Persona-chat dataset (Zhang et al., 2018) indicate that our solution outperforms non-meta-learning baselines using automatic evaluation metrics, and in terms of human-evaluated fluency and consistency.", "doc_id": "db9e1d698c8c67ff145e7127721fa64a", "publication_year": 2019, "sentences": ["existing personalized dialogue models use human designed persona descriptions to improve dialogue consistency .", "collecting such descriptions from existing dialogues is expensive and requires hand - crafted feature designs .", "in this paper , we propose to extend model - agnostic meta - learning ( maml ) ( finn et al . , 2017 ) to personalized dialogue learning without using any persona descriptions .", "our model learns to quickly adapt to new personas by leveraging only a few dialogue samples collected from the same user , which is fundamentally different from conditioning the response on the persona descriptions .", "empirical results on persona - chat dataset ( zhang et al . , 2018 ) indicate that our solution outperforms non - meta - learning baselines using automatic evaluation metrics , and in terms of human - evaluated fluency and consistency ."], "events": [{"event_type": "RWS", "arguments": [{"text": "existing personalized dialogue models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "personalized", "dialogue", "models"], "offsets": [0, 1, 2, 3]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [10]}, {"text": "human designed persona descriptions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["human", "designed", "persona", "descriptions"], "offsets": [5, 6, 7, 8]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [4]}}, {"event_type": "PUR", "arguments": [{"text": "dialogue consistency", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["dialogue", "consistency"], "offsets": [11, 12]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "collecting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["collecting"], "offsets": [14]}], "trigger": {"text": "expensive", "tokens": ["expensive"], "offsets": [21]}}, {"event_type": "RWF", "arguments": [{"text": "hand - crafted feature designs", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["hand", "-", "crafted", "feature", "designs"], "offsets": [24, 25, 26, 27, 28]}, {"text": "collecting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["collecting"], "offsets": [14]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [23]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [34]}, {"text": "model - agnostic meta - learning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model", "-", "agnostic", "meta", "-", "learning"], "offsets": [38, 39, 40, 41, 42, 43]}, {"text": "without using any persona descriptions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "using", "any", "persona", "descriptions"], "offsets": [59, 60, 61, 62, 63]}, {"text": "personalized dialogue learning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["personalized", "dialogue", "learning"], "offsets": [56, 57, 58]}], "trigger": {"text": "extend", "tokens": ["extend"], "offsets": [37]}}, {"event_type": "MDS", "arguments": [{"text": "dialogue samples", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["dialogue", "samples"], "offsets": [79, 80]}, {"text": "quickly adapt", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["quickly", "adapt"], "offsets": [69, 70]}], "trigger": {"text": "leveraging", "tokens": ["leveraging"], "offsets": [75]}}, {"event_type": "PUR", "arguments": [{"text": "new personas", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["new", "personas"], "offsets": [72, 73]}], "trigger": {"text": "quickly adapt", "tokens": ["quickly", "adapt"], "offsets": [69, 70]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [119]}], "trigger": {"text": "indicate", "tokens": ["indicate"], "offsets": [115]}}, {"event_type": "CMP", "arguments": [{"text": "solution", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["solution"], "offsets": [118]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [119]}, {"text": "non - meta - learning baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["non", "-", "meta", "-", "learning", "baselines"], "offsets": [120, 121, 122, 123, 124, 125]}, {"text": "automatic evaluation metrics", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["automatic", "evaluation", "metrics"], "offsets": [127, 128, 129]}, {"text": "human - evaluated fluency", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["human", "-", "evaluated", "fluency"], "offsets": [135, 136, 137, 138]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [119]}}, {"event_type": "PUR", "arguments": [{"text": "such descriptions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["human", "designed", "persona", "descriptions"], "offsets": [5, 6, 7, 8]}, {"text": "from existing dialogues", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "existing", "dialogues"], "offsets": [17, 18, 19]}], "trigger": {"text": "collecting", "tokens": ["collecting"], "offsets": [14]}}], "document": ["existing", "personalized", "dialogue", "models", "use", "human", "designed", "persona", "descriptions", "to", "improve", "dialogue", "consistency", ".", "collecting", "such", "descriptions", "from", "existing", "dialogues", "is", "expensive", "and", "requires", "hand", "-", "crafted", "feature", "designs", ".", "in", "this", "paper", ",", "we", "propose", "to", "extend", "model", "-", "agnostic", "meta", "-", "learning", "(", "maml", ")", "(", "finn", "et", "al", ".", ",", "2017", ")", "to", "personalized", "dialogue", "learning", "without", "using", "any", "persona", "descriptions", ".", "our", "model", "learns", "to", "quickly", "adapt", "to", "new", "personas", "by", "leveraging", "only", "a", "few", "dialogue", "samples", "collected", "from", "the", "same", "user", ",", "which", "is", "fundamentally", "different", "from", "conditioning", "the", "response", "on", "the", "persona", "descriptions", ".", "empirical", "results", "on", "persona", "-", "chat", "dataset", "(", "zhang", "et", "al", ".", ",", "2018", ")", "indicate", "that", "our", "solution", "outperforms", "non", "-", "meta", "-", "learning", "baselines", "using", "automatic", "evaluation", "metrics", ",", "and", "in", "terms", "of", "human", "-", "evaluated", "fluency", "and", "consistency", "."]}, {"venue": "ACL", "title": "Enhancing Cross-target Stance Detection with Transferable Semantic-Emotion Knowledge", "abstract": "Stance detection is an important task, which aims to classify the attitude of an opinionated text towards a given target. Remarkable success has been achieved when sufficient labeled training data is available. However, annotating sufficient data is labor-intensive, which establishes significant barriers for generalizing the stance classifier to the data with new targets. In this paper, we proposed a Semantic-Emotion Knowledge Transferring (SEKT) model for cross-target stance detection, which uses the external knowledge (semantic and emotion lexicons) as a bridge to enable knowledge transfer across different targets. Specifically, a semantic-emotion heterogeneous graph is constructed from external semantic and emotion lexicons, which is then fed into a graph convolutional network to learn multi-hop semantic connections between words and emotion tags. Then, the learned semantic-emotion graph representation, which serves as prior knowledge bridging the gap between the source and target domains, is fully integrated into the bidirectional long short-term memory (BiLSTM) stance classifier by adding a novel knowledge-aware memory unit to the BiLSTM cell. Extensive experiments on a large real-world dataset demonstrate the superiority of SEKT against the state-of-the-art baseline methods.", "doc_id": "7152a8595e42876e4e4c9db97fb4b29e", "publication_year": 2020, "sentences": ["stance detection is an important task , which aims to classify the attitude of an opinionated text towards a given target .", "remarkable success has been achieved when sufficient labeled training data is available .", "however , annotating sufficient data is labor - intensive , which establishes significant barriers for generalizing the stance classifier to the data with new targets .", "in this paper , we proposed a semantic - emotion knowledge transferring ( sekt ) model for cross - target stance detection , which uses the external knowledge ( semantic and emotion lexicons ) as a bridge to enable knowledge transfer across different targets .", "specifically , a semantic - emotion heterogeneous graph is constructed from external semantic and emotion lexicons , which is then fed into a graph convolutional network to learn multi - hop semantic connections between words and emotion tags .", "then , the learned semantic - emotion graph representation , which serves as prior knowledge bridging the gap between the source and target domains , is fully integrated into the bidirectional long short - term memory ( bilstm ) stance classifier by adding a novel knowledge - aware memory unit to the bilstm cell .", "extensive experiments on a large real - world dataset demonstrate the superiority of sekt against the state - of - the - art baseline methods ."], "events": [{"event_type": "ITT", "arguments": [{"text": "stance detection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["stance", "detection"], "offsets": [0, 1]}], "trigger": {"text": "classify", "tokens": ["classify"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "annotating sufficient data", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["annotating", "sufficient", "data"], "offsets": [37, 38, 39]}], "trigger": {"text": "labor - intensive", "tokens": ["labor", "-", "intensive"], "offsets": [41, 42, 43]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [65]}, {"text": "cross - target stance detection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["cross", "-", "target", "stance", "detection"], "offsets": [78, 79, 80, 81, 82]}, {"text": "semantic - emotion knowledge transferring ( sekt ) model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["semantic", "-", "emotion", "knowledge", "transferring", "model"], "offsets": [68, 69, 70, 71, 72, 76]}], "trigger": {"text": "proposed", "tokens": ["proposed"], "offsets": [66]}}, {"event_type": "MDS", "arguments": [{"text": "graph convolutional network", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["graph", "convolutional", "network"], "offsets": [129, 130, 131]}, {"text": "semantic - emotion heterogeneous graph", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["semantic", "-", "emotion", "heterogeneous", "graph"], "offsets": [109, 110, 111, 112, 113]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [133]}], "trigger": {"text": "fed", "tokens": ["fed"], "offsets": [126]}}, {"event_type": "PUR", "arguments": [{"text": "multi - hop semantic connections between words and emotion tags", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["multi", "-", "hop", "semantic", "connections", "between", "words", "and", "emotion", "tags"], "offsets": [134, 135, 136, 137, 138, 139, 140, 141, 142, 143]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [133]}}, {"event_type": "MDS", "arguments": [{"text": "knowledge - aware memory unit", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["knowledge", "-", "aware", "memory", "unit"], "offsets": [190, 191, 192, 193, 194]}, {"text": "bilstm cell", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["bidirectional", "long", "short", "-", "term", "memory", "cell"], "offsets": [175, 176, 177, 178, 179, 180, 198]}], "trigger": {"text": "adding", "tokens": ["adding"], "offsets": [187]}}, {"event_type": "MDS", "arguments": [{"text": "bidirectional long short - term memory ( bilstm ) stance classifier", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["bidirectional", "long", "short", "-", "term", "memory", "stance", "classifier"], "offsets": [175, 176, 177, 178, 179, 180, 184, 185]}, {"text": "learned semantic - emotion graph representation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["learned", "semantic", "-", "emotion", "graph", "representation"], "offsets": [148, 149, 150, 151, 152, 153]}], "trigger": {"text": "integrated", "tokens": ["integrated"], "offsets": [172]}}, {"event_type": "FIN", "arguments": [{"text": "against", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["against"], "offsets": [214]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [209]}}, {"event_type": "CMP", "arguments": [{"text": "semantic - emotion knowledge transferring", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["semantic", "-", "emotion", "knowledge", "transferring"], "offsets": [68, 69, 70, 71, 72]}, {"text": "state - of - the - art baseline methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "baseline", "methods"], "offsets": [216, 217, 218, 219, 220, 221, 222, 223, 224]}, {"text": "real - world dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["real", "-", "world", "dataset"], "offsets": [205, 206, 207, 208]}, {"text": "superiority", "nugget_type": "STR", "argument_type": "Result", "tokens": ["superiority"], "offsets": [211]}], "trigger": {"text": "against", "tokens": ["against"], "offsets": [214]}}, {"event_type": "RWF", "arguments": [{"text": "annotating sufficient data", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["annotating", "sufficient", "data"], "offsets": [37, 38, 39]}, {"text": "significant barriers", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["significant", "barriers"], "offsets": [47, 48]}, {"text": "generalizing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generalizing"], "offsets": [50]}], "trigger": {"text": "establishes", "tokens": ["establishes"], "offsets": [46]}}, {"event_type": "PUR", "arguments": [{"text": "stance classifier", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["stance", "classifier"], "offsets": [52, 53]}, {"text": "to the data with new targets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "the", "data", "with", "new", "targets"], "offsets": [54, 55, 56, 57, 58, 59]}], "trigger": {"text": "generalizing", "tokens": ["generalizing"], "offsets": [50]}}, {"event_type": "MDS", "arguments": [{"text": "external knowledge", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["external", "knowledge"], "offsets": [87, 88]}, {"text": "bridge", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["bridge"], "offsets": [97]}, {"text": "enable", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enable"], "offsets": [99]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [85]}}, {"event_type": "PUR", "arguments": [{"text": "knowledge transfer across different targets", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["knowledge", "transfer", "across", "different", "targets"], "offsets": [100, 101, 102, 103, 104]}], "trigger": {"text": "enable", "tokens": ["enable"], "offsets": [99]}}, {"event_type": "MDS", "arguments": [{"text": "semantic - emotion heterogeneous graph", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["semantic", "-", "emotion", "heterogeneous", "graph"], "offsets": [109, 110, 111, 112, 113]}, {"text": "external semantic", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["external", "semantic"], "offsets": [117, 118]}, {"text": "emotion lexicons", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["emotion", "lexicons"], "offsets": [120, 121]}], "trigger": {"text": "constructed", "tokens": ["constructed"], "offsets": [115]}}], "document": ["stance", "detection", "is", "an", "important", "task", ",", "which", "aims", "to", "classify", "the", "attitude", "of", "an", "opinionated", "text", "towards", "a", "given", "target", ".", "remarkable", "success", "has", "been", "achieved", "when", "sufficient", "labeled", "training", "data", "is", "available", ".", "however", ",", "annotating", "sufficient", "data", "is", "labor", "-", "intensive", ",", "which", "establishes", "significant", "barriers", "for", "generalizing", "the", "stance", "classifier", "to", "the", "data", "with", "new", "targets", ".", "in", "this", "paper", ",", "we", "proposed", "a", "semantic", "-", "emotion", "knowledge", "transferring", "(", "sekt", ")", "model", "for", "cross", "-", "target", "stance", "detection", ",", "which", "uses", "the", "external", "knowledge", "(", "semantic", "and", "emotion", "lexicons", ")", "as", "a", "bridge", "to", "enable", "knowledge", "transfer", "across", "different", "targets", ".", "specifically", ",", "a", "semantic", "-", "emotion", "heterogeneous", "graph", "is", "constructed", "from", "external", "semantic", "and", "emotion", "lexicons", ",", "which", "is", "then", "fed", "into", "a", "graph", "convolutional", "network", "to", "learn", "multi", "-", "hop", "semantic", "connections", "between", "words", "and", "emotion", "tags", ".", "then", ",", "the", "learned", "semantic", "-", "emotion", "graph", "representation", ",", "which", "serves", "as", "prior", "knowledge", "bridging", "the", "gap", "between", "the", "source", "and", "target", "domains", ",", "is", "fully", "integrated", "into", "the", "bidirectional", "long", "short", "-", "term", "memory", "(", "bilstm", ")", "stance", "classifier", "by", "adding", "a", "novel", "knowledge", "-", "aware", "memory", "unit", "to", "the", "bilstm", "cell", ".", "extensive", "experiments", "on", "a", "large", "real", "-", "world", "dataset", "demonstrate", "the", "superiority", "of", "sekt", "against", "the", "state", "-", "of", "-", "the", "-", "art", "baseline", "methods", "."]}, {"venue": "ACL", "title": "Learning the Beauty in Songs: Neural Singing Voice Beautifier", "abstract": "We are interested in a novel task, singing voice beautification (SVB). Given the singing voice of an amateur singer, SVB aims to improve the intonation and vocal tone of the voice, while keeping the content and vocal timbre. Current automatic pitch correction techniques are immature, and most of them are restricted to intonation but ignore the overall aesthetic quality. Hence, we introduce Neural Singing Voice Beautifier (NSVB), the first generative model to solve the SVB task, which adopts a conditional variational autoencoder as the backbone and learns the latent representations of vocal tone. In NSVB, we propose a novel time-warping approach for pitch correction: Shape-Aware Dynamic Time Warping (SADTW), which ameliorates the robustness of existing time-warping approaches, to synchronize the amateur recording with the template pitch curve. Furthermore, we propose a latent-mapping algorithm in the latent space to convert the amateur vocal tone to the professional one. To achieve this, we also propose a new dataset containing parallel singing recordings of both amateur and professional versions. Extensive experiments on both Chinese and English songs demonstrate the effectiveness of our methods in terms of both objective and subjective metrics. Audio samples are available at https://neuralsvb.github.io. Codes: https://github.com/MoonInTheRiver/NeuralSVB.", "doc_id": "24a489737fa07718653a94109f160a45", "publication_year": 2022, "sentences": ["we are interested in a novel task , singing voice beautification ( svb ) .", "given the singing voice of an amateur singer , svb aims to improve the intonation and vocal tone of the voice , while keeping the content and vocal timbre .", "current automatic pitch correction techniques are immature , and most of them are restricted to intonation but ignore the overall aesthetic quality .", "hence , we introduce neural singing voice beautifier ( nsvb ) , the first generative model to solve the svb task , which adopts a conditional variational autoencoder as the backbone and learns the latent representations of vocal tone .", "in nsvb , we propose a novel time - warping approach for pitch correction : shape - aware dynamic time warping ( sadtw ) , which ameliorates the robustness of existing time - warping approaches , to synchronize the amateur recording with the template pitch curve .", "furthermore , we propose a latent - mapping algorithm in the latent space to convert the amateur vocal tone to the professional one .", "to achieve this , we also propose a new dataset containing parallel singing recordings of both amateur and professional versions .", "extensive experiments on both chinese and english songs demonstrate the effectiveness of our methods in terms of both objective and subjective metrics .", "audio samples are available at https : / / neuralsvb . github . io .", "codes : https : / / github . com / moonintheriver / neuralsvb ."], "events": [{"event_type": "ITT", "arguments": [{"text": "singing voice beautification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["singing", "voice", "beautification"], "offsets": [8, 9, 10]}], "trigger": {"text": "interested", "tokens": ["interested"], "offsets": [2]}}, {"event_type": "RWF", "arguments": [{"text": "current automatic pitch correction techniques", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["current", "automatic", "pitch", "correction", "techniques"], "offsets": [45, 46, 47, 48, 49]}, {"text": "overall aesthetic quality", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["overall", "aesthetic", "quality"], "offsets": [64, 65, 66]}], "trigger": {"text": "ignore", "tokens": ["ignore"], "offsets": [62]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [70]}, {"text": "neural singing voice beautifier", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neural", "singing", "voice", "beautifier"], "offsets": [72, 73, 74, 75]}, {"text": "solve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["solve"], "offsets": [85]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [71]}}, {"event_type": "PUR", "arguments": [{"text": "singing voice beautification", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["singing", "voice", "beautification"], "offsets": [8, 9, 10]}], "trigger": {"text": "solve", "tokens": ["solve"], "offsets": [85]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [111]}, {"text": "pitch correction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["pitch", "correction"], "offsets": [120, 121]}, {"text": "shape - aware dynamic time warping", "nugget_type": "APP", "argument_type": "Content", "tokens": ["shape", "-", "aware", "dynamic", "time", "warping"], "offsets": [123, 124, 125, 126, 127, 128]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [112]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [157]}, {"text": "latent - mapping algorithm", "nugget_type": "APP", "argument_type": "Content", "tokens": ["latent", "-", "mapping", "algorithm"], "offsets": [160, 161, 162, 163]}, {"text": "convert", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["convert"], "offsets": [169]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [158]}}, {"event_type": "PUR", "arguments": [{"text": "amateur vocal tone", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["amateur", "vocal", "tone"], "offsets": [171, 172, 173]}, {"text": "professional one", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["professional", "one"], "offsets": [176, 177]}], "trigger": {"text": "convert", "tokens": ["convert"], "offsets": [169]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [183]}, {"text": "dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["dataset"], "offsets": [188]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [185]}}, {"event_type": "FAC", "arguments": [{"text": "neural singing voice beautifier", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["neural", "singing", "voice", "beautifier"], "offsets": [72, 73, 74, 75]}, {"text": "in terms of both objective and subjective metrics", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "terms", "of", "both", "objective", "and", "subjective", "metrics"], "offsets": [214, 215, 216, 217, 218, 219, 220, 221]}, {"text": "effectiveness of our methods", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["effectiveness", "of", "our", "methods"], "offsets": [210, 211, 212, 213]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [208]}}, {"event_type": "WKS", "arguments": [{"text": "conditional variational autoencoder", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["conditional", "variational", "autoencoder"], "offsets": [93, 94, 95]}, {"text": "as the backbone", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "the", "backbone"], "offsets": [96, 97, 98]}], "trigger": {"text": "adopts", "tokens": ["adopts"], "offsets": [91]}}, {"event_type": "WKS", "arguments": [{"text": "latent representations of vocal tone", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["latent", "representations", "of", "vocal", "tone"], "offsets": [102, 103, 104, 105, 106]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [100]}}, {"event_type": "PUR", "arguments": [{"text": "amateur recording", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["amateur", "recording"], "offsets": [147, 148]}, {"text": "template pitch curve", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["template", "pitch", "curve"], "offsets": [151, 152, 153]}], "trigger": {"text": "synchronize", "tokens": ["synchronize"], "offsets": [145]}}, {"event_type": "WKS", "arguments": [{"text": "robustness of existing time - warping approaches", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["robustness", "of", "existing", "time", "-", "warping", "approaches"], "offsets": [136, 137, 138, 139, 140, 141, 142]}, {"text": "synchronize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["synchronize"], "offsets": [145]}], "trigger": {"text": "ameliorates", "tokens": ["ameliorates"], "offsets": [134]}}], "document": ["we", "are", "interested", "in", "a", "novel", "task", ",", "singing", "voice", "beautification", "(", "svb", ")", ".", "given", "the", "singing", "voice", "of", "an", "amateur", "singer", ",", "svb", "aims", "to", "improve", "the", "intonation", "and", "vocal", "tone", "of", "the", "voice", ",", "while", "keeping", "the", "content", "and", "vocal", "timbre", ".", "current", "automatic", "pitch", "correction", "techniques", "are", "immature", ",", "and", "most", "of", "them", "are", "restricted", "to", "intonation", "but", "ignore", "the", "overall", "aesthetic", "quality", ".", "hence", ",", "we", "introduce", "neural", "singing", "voice", "beautifier", "(", "nsvb", ")", ",", "the", "first", "generative", "model", "to", "solve", "the", "svb", "task", ",", "which", "adopts", "a", "conditional", "variational", "autoencoder", "as", "the", "backbone", "and", "learns", "the", "latent", "representations", "of", "vocal", "tone", ".", "in", "nsvb", ",", "we", "propose", "a", "novel", "time", "-", "warping", "approach", "for", "pitch", "correction", ":", "shape", "-", "aware", "dynamic", "time", "warping", "(", "sadtw", ")", ",", "which", "ameliorates", "the", "robustness", "of", "existing", "time", "-", "warping", "approaches", ",", "to", "synchronize", "the", "amateur", "recording", "with", "the", "template", "pitch", "curve", ".", "furthermore", ",", "we", "propose", "a", "latent", "-", "mapping", "algorithm", "in", "the", "latent", "space", "to", "convert", "the", "amateur", "vocal", "tone", "to", "the", "professional", "one", ".", "to", "achieve", "this", ",", "we", "also", "propose", "a", "new", "dataset", "containing", "parallel", "singing", "recordings", "of", "both", "amateur", "and", "professional", "versions", ".", "extensive", "experiments", "on", "both", "chinese", "and", "english", "songs", "demonstrate", "the", "effectiveness", "of", "our", "methods", "in", "terms", "of", "both", "objective", "and", "subjective", "metrics", ".", "audio", "samples", "are", "available", "at", "https", ":", "/", "/", "neuralsvb", ".", "github", ".", "io", ".", "codes", ":", "https", ":", "/", "/", "github", ".", "com", "/", "moonintheriver", "/", "neuralsvb", "."]}, {"venue": "ACL", "title": "Exploiting Language Model Prompts Using Similarity Measures: A Case Study on the Word-in-Context Task", "abstract": "As a recent development in few-shot learning, prompt-based techniques have demonstrated promising potential in a variety of natural language processing tasks. However, despite proving competitive on most tasks in the GLUE and SuperGLUE benchmarks, existing prompt-based techniques fail on the semantic distinction task of the Word-in-Context (WiC) dataset. Specifically, none of the existing few-shot approaches (including the in-context learning of GPT-3) can attain a performance that is meaningfully different from the random baseline.Trying to fill this gap, we propose a new prompting technique, based on similarity metrics, which boosts few-shot performance to the level of fully supervised methods. Our simple adaptation shows that the failure of existing prompt-based techniques in semantic distinction is due to their improper configuration, rather than lack of relevant knowledge in the representations. We also show that this approach can be effectively extended to other downstream tasks for which a single prompt is sufficient.", "doc_id": "6662db1cdea1b346e6b64ac1e8e40ce5", "publication_year": 2022, "sentences": ["as a recent development in few - shot learning , prompt - based techniques have demonstrated promising potential in a variety of natural language processing tasks .", "however , despite proving competitive on most tasks in the glue and superglue benchmarks , existing prompt - based techniques fail on the semantic distinction task of the word - in - context ( wic ) dataset .", "specifically , none of the existing few - shot approaches ( including the in - context learning of gpt - 3 ) can attain a performance that is meaningfully different from the random baseline .", "trying to fill this gap , we propose a new prompting technique , based on similarity metrics , which boosts few - shot performance to the level of fully supervised methods .", "our simple adaptation shows that the failure of existing prompt - based techniques in semantic distinction is due to their improper configuration , rather than lack of relevant knowledge in the representations .", "we also show that this approach can be effectively extended to other downstream tasks for which a single prompt is sufficient ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language processing tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "processing", "tasks"], "offsets": [22, 23, 24, 25]}], "trigger": {"text": "demonstrated", "tokens": ["demonstrated"], "offsets": [15]}}, {"event_type": "RWF", "arguments": [{"text": "existing prompt - based techniques", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "prompt", "-", "based", "techniques"], "offsets": [42, 43, 44, 45, 46]}, {"text": "fail", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["fail"], "offsets": [47]}], "trigger": {"text": "fail", "tokens": ["fail"], "offsets": [47]}}, {"event_type": "RWF", "arguments": [{"text": "none of the existing few - shot approaches", "nugget_type": "WEA", "argument_type": "Concern", "tokens": ["none", "of", "the", "existing", "few", "-", "shot", "approaches"], "offsets": [67, 68, 69, 70, 71, 72, 73, 74]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["performance"], "offsets": [90]}], "trigger": {"text": "attain", "tokens": ["attain"], "offsets": [88]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [106]}, {"text": "prompting technique", "nugget_type": "APP", "argument_type": "Content", "tokens": ["prompting", "technique"], "offsets": [110, 111]}, {"text": "boosts", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["boosts"], "offsets": [119]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [107]}}, {"event_type": "FIN", "arguments": [{"text": "due", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["due"], "offsets": [149]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [135]}}, {"event_type": "FAC", "arguments": [{"text": "failure of existing prompt - based techniques", "nugget_type": "WEA", "argument_type": "Subject", "tokens": ["failure", "of", "existing", "prompt", "-", "based", "techniques"], "offsets": [138, 139, 140, 141, 142, 143, 144]}, {"text": "semantic distinction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["semantic", "distinction"], "offsets": [146, 147]}, {"text": "improper configuration", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["improper", "configuration"], "offsets": [152, 153]}, {"text": "rather than lack of relevant knowledge in the representations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["rather", "than", "lack", "of", "relevant", "knowledge", "in", "the", "representations"], "offsets": [155, 156, 157, 158, 159, 160, 161, 162, 163]}], "trigger": {"text": "due", "tokens": ["due"], "offsets": [149]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [165]}, {"text": "effectively extended", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["effectively", "extended"], "offsets": [173, 174]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [167]}}, {"event_type": "FAC", "arguments": [{"text": "prompting technique", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["prompting", "technique"], "offsets": [110, 111]}, {"text": "downstream tasks", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["downstream", "tasks"], "offsets": [177, 178]}, {"text": "for which a single prompt is sufficient", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "which", "a", "single", "prompt", "is", "sufficient"], "offsets": [179, 180, 181, 182, 183, 184, 185]}], "trigger": {"text": "effectively extended", "tokens": ["effectively", "extended"], "offsets": [173, 174]}}, {"event_type": "PUR", "arguments": [{"text": "few - shot performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["few", "-", "shot", "performance"], "offsets": [120, 121, 122, 123]}, {"text": "to the level of fully supervised methods", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "the", "level", "of", "fully", "supervised", "methods"], "offsets": [124, 125, 126, 127, 128, 129, 130]}], "trigger": {"text": "boosts", "tokens": ["boosts"], "offsets": [119]}}], "document": ["as", "a", "recent", "development", "in", "few", "-", "shot", "learning", ",", "prompt", "-", "based", "techniques", "have", "demonstrated", "promising", "potential", "in", "a", "variety", "of", "natural", "language", "processing", "tasks", ".", "however", ",", "despite", "proving", "competitive", "on", "most", "tasks", "in", "the", "glue", "and", "superglue", "benchmarks", ",", "existing", "prompt", "-", "based", "techniques", "fail", "on", "the", "semantic", "distinction", "task", "of", "the", "word", "-", "in", "-", "context", "(", "wic", ")", "dataset", ".", "specifically", ",", "none", "of", "the", "existing", "few", "-", "shot", "approaches", "(", "including", "the", "in", "-", "context", "learning", "of", "gpt", "-", "3", ")", "can", "attain", "a", "performance", "that", "is", "meaningfully", "different", "from", "the", "random", "baseline", ".", "trying", "to", "fill", "this", "gap", ",", "we", "propose", "a", "new", "prompting", "technique", ",", "based", "on", "similarity", "metrics", ",", "which", "boosts", "few", "-", "shot", "performance", "to", "the", "level", "of", "fully", "supervised", "methods", ".", "our", "simple", "adaptation", "shows", "that", "the", "failure", "of", "existing", "prompt", "-", "based", "techniques", "in", "semantic", "distinction", "is", "due", "to", "their", "improper", "configuration", ",", "rather", "than", "lack", "of", "relevant", "knowledge", "in", "the", "representations", ".", "we", "also", "show", "that", "this", "approach", "can", "be", "effectively", "extended", "to", "other", "downstream", "tasks", "for", "which", "a", "single", "prompt", "is", "sufficient", "."]}, {"venue": "ACL", "title": "FaVIQ: FAct Verification from Information-seeking Questions", "abstract": "Despite significant interest in developing general purpose fact checking models, it is challenging to construct a large-scale fact verification dataset with realistic real-world claims. Existing claims are either authored by crowdworkers, thereby introducing subtle biases thatare difficult to control for, or manually verified by professional fact checkers, causing them to be expensive and limited in scale. In this paper, we construct a large-scale challenging fact verification dataset called FAVIQ, consisting of 188k claims derived from an existing corpus of ambiguous information-seeking questions. The ambiguities in the questions enable automatically constructing true and false claims that reflect user confusions (e.g., the year of the movie being filmed vs. being released). Claims in FAVIQ are verified to be natural, contain little lexical bias, and require a complete understanding of the evidence for verification. Our experiments show that the state-of-the-art models are far from solving our new task. Moreover, training on our data helps in professional fact-checking, outperforming models trained on the widely used dataset FEVER or in-domain data by up to 17% absolute. Altogether, our data will serve as a challenging benchmark for natural language understanding and support future progress in professional fact checking.", "doc_id": "a9502661987ff9decb135c877b556e4c", "publication_year": 2022, "sentences": ["despite significant interest in developing general purpose fact checking models , it is challenging to construct a large - scale fact verification dataset with realistic real - world claims .", "existing claims are either authored by crowdworkers , thereby introducing subtle biases thatare difficult to control for , or manually verified by professional fact checkers , causing them to be expensive and limited in scale .", "in this paper , we construct a large - scale challenging fact verification dataset called faviq , consisting of 188k claims derived from an existing corpus of ambiguous information - seeking questions .", "the ambiguities in the questions enable automatically constructing true and false claims that reflect user confusions ( e . g . , the year of the movie being filmed vs . being released ) .", "claims in faviq are verified to be natural , contain little lexical bias , and require a complete understanding of the evidence for verification .", "our experiments show that the state - of - the - art models are far from solving our new task .", "moreover , training on our data helps in professional fact - checking , outperforming models trained on the widely used dataset fever or in - domain data by up to 17 % absolute .", "altogether , our data will serve as a challenging benchmark for natural language understanding and support future progress in professional fact checking ."], "events": [{"event_type": "ITT", "arguments": [{"text": "general purpose fact checking models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["general", "purpose", "fact", "checking", "models"], "offsets": [5, 6, 7, 8, 9]}], "trigger": {"text": "developing", "tokens": ["developing"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "existing claims", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["existing", "claims"], "offsets": [30, 31]}, {"text": "subtle biases", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["subtle", "biases"], "offsets": [40, 41]}], "trigger": {"text": "introducing", "tokens": ["introducing"], "offsets": [39]}}, {"event_type": "RWF", "arguments": [{"text": "existing claims", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["existing", "claims"], "offsets": [30, 31]}, {"text": "subtle biases", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["subtle", "biases"], "offsets": [40, 41]}, {"text": "by crowdworkers", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "crowdworkers"], "offsets": [35, 36]}], "trigger": {"text": "introducing", "tokens": ["introducing"], "offsets": [39]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [70]}, {"text": "large - scale challenging fact verification dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["large", "-", "scale", "challenging", "fact", "verification", "dataset"], "offsets": [73, 74, 75, 76, 77, 78, 79]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [71]}}, {"event_type": "FIN", "arguments": [{"text": "far from solving", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["far", "from", "solving"], "offsets": [173, 174, 175]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [161]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["state", "-", "of", "-", "the", "-", "art", "models"], "offsets": [164, 165, 166, 167, 168, 169, 170, 171]}], "trigger": {"text": "far from solving", "tokens": ["far", "from", "solving"], "offsets": [173, 174, 175]}}, {"event_type": "CMP", "arguments": [{"text": "outperforming", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforming"], "offsets": [193]}, {"text": "models trained on the widely used dataset fever or in - domain data", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["models", "trained", "on", "the", "widely", "used", "dataset", "fever", "or", "in", "-", "domain", "data"], "offsets": [194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206]}, {"text": "absolute", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["absolute"], "offsets": [212]}], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [193]}}, {"event_type": "RWF", "arguments": [{"text": "existing claims", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "claims"], "offsets": [30, 31]}, {"text": "expensive", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["expensive"], "offsets": [60]}, {"text": "limited in scale", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["limited", "in", "scale"], "offsets": [62, 63, 64]}], "trigger": {"text": "expensive", "tokens": ["expensive"], "offsets": [60]}}, {"event_type": "MDS", "arguments": [{"text": "ambiguities in the questions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["ambiguities", "in", "the", "questions"], "offsets": [100, 101, 102, 103]}, {"text": "true and false claims", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["true", "and", "false", "claims"], "offsets": [107, 108, 109, 110]}], "trigger": {"text": "automatically constructing", "tokens": ["automatically", "constructing"], "offsets": [105, 106]}}, {"event_type": "WKS", "arguments": [{"text": "large - scale challenging fact verification dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["large", "-", "scale", "challenging", "fact", "verification", "dataset"], "offsets": [73, 74, 75, 76, 77, 78, 79]}, {"text": "helps", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["helps"], "offsets": [186]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [182]}}, {"event_type": "PUR", "arguments": [{"text": "professional fact - checking", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["professional", "fact", "-", "checking"], "offsets": [188, 189, 190, 191]}], "trigger": {"text": "helps", "tokens": ["helps"], "offsets": [186]}}], "document": ["despite", "significant", "interest", "in", "developing", "general", "purpose", "fact", "checking", "models", ",", "it", "is", "challenging", "to", "construct", "a", "large", "-", "scale", "fact", "verification", "dataset", "with", "realistic", "real", "-", "world", "claims", ".", "existing", "claims", "are", "either", "authored", "by", "crowdworkers", ",", "thereby", "introducing", "subtle", "biases", "thatare", "difficult", "to", "control", "for", ",", "or", "manually", "verified", "by", "professional", "fact", "checkers", ",", "causing", "them", "to", "be", "expensive", "and", "limited", "in", "scale", ".", "in", "this", "paper", ",", "we", "construct", "a", "large", "-", "scale", "challenging", "fact", "verification", "dataset", "called", "faviq", ",", "consisting", "of", "188k", "claims", "derived", "from", "an", "existing", "corpus", "of", "ambiguous", "information", "-", "seeking", "questions", ".", "the", "ambiguities", "in", "the", "questions", "enable", "automatically", "constructing", "true", "and", "false", "claims", "that", "reflect", "user", "confusions", "(", "e", ".", "g", ".", ",", "the", "year", "of", "the", "movie", "being", "filmed", "vs", ".", "being", "released", ")", ".", "claims", "in", "faviq", "are", "verified", "to", "be", "natural", ",", "contain", "little", "lexical", "bias", ",", "and", "require", "a", "complete", "understanding", "of", "the", "evidence", "for", "verification", ".", "our", "experiments", "show", "that", "the", "state", "-", "of", "-", "the", "-", "art", "models", "are", "far", "from", "solving", "our", "new", "task", ".", "moreover", ",", "training", "on", "our", "data", "helps", "in", "professional", "fact", "-", "checking", ",", "outperforming", "models", "trained", "on", "the", "widely", "used", "dataset", "fever", "or", "in", "-", "domain", "data", "by", "up", "to", "17", "%", "absolute", ".", "altogether", ",", "our", "data", "will", "serve", "as", "a", "challenging", "benchmark", "for", "natural", "language", "understanding", "and", "support", "future", "progress", "in", "professional", "fact", "checking", "."]}, {"venue": "ACL", "title": "Visually Grounded Neural Syntax Acquisition", "abstract": "We present the Visually Grounded Neural Syntax Learner (VG-NSL), an approach for learning syntactic representations and structures without any explicit supervision. The model learns by looking at natural images and reading paired captions. VG-NSL generates constituency parse trees of texts, recursively composes representations for constituents, and matches them with images. We define concreteness of constituents by their matching scores with images, and use it to guide the parsing of text. Experiments on the MSCOCO data set show that VG-NSL outperforms various unsupervised parsing approaches that do not use visual grounding, in terms of F1 scores against gold parse trees. We find that VGNSL is much more stable with respect to the choice of random initialization and the amount of training data. We also find that the concreteness acquired by VG-NSL correlates well with a similar measure defined by linguists. Finally, we also apply VG-NSL to multiple languages in the Multi30K data set, showing that our model consistently outperforms prior unsupervised approaches.", "doc_id": "1b4b1943647aefd3e12287ecc34f5f85", "publication_year": 2019, "sentences": ["we present the visually grounded neural syntax learner ( vg - nsl ) , an approach for learning syntactic representations and structures without any explicit supervision .", "the model learns by looking at natural images and reading paired captions .", "vg - nsl generates constituency parse trees of texts , recursively composes representations for constituents , and matches them with images .", "we define concreteness of constituents by their matching scores with images , and use it to guide the parsing of text .", "experiments on the mscoco data set show that vg - nsl outperforms various unsupervised parsing approaches that do not use visual grounding , in terms of f1 scores against gold parse trees .", "we find that vgnsl is much more stable with respect to the choice of random initialization and the amount of training data .", "we also find that the concreteness acquired by vg - nsl correlates well with a similar measure defined by linguists .", "finally , we also apply vg - nsl to multiple languages in the multi30k data set , showing that our model consistently outperforms prior unsupervised approaches ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "visually grounded neural syntax learner", "nugget_type": "APP", "argument_type": "Content", "tokens": ["visually", "grounded", "neural", "syntax", "learner"], "offsets": [3, 4, 5, 6, 7]}, {"text": "learning", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learning"], "offsets": [17]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "without any explicit supervision", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "any", "explicit", "supervision"], "offsets": [22, 23, 24, 25]}, {"text": "syntactic representations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["syntactic", "representations"], "offsets": [18, 19]}, {"text": "structures", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["structures"], "offsets": [21]}], "trigger": {"text": "learning", "tokens": ["learning"], "offsets": [17]}}, {"event_type": "MDS", "arguments": [{"text": "natural images", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["natural", "images"], "offsets": [33, 34]}], "trigger": {"text": "looking", "tokens": ["looking"], "offsets": [31]}}, {"event_type": "MDS", "arguments": [{"text": "constituency parse trees of texts", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["constituency", "parse", "trees", "of", "texts"], "offsets": [44, 45, 46, 47, 48]}], "trigger": {"text": "generates", "tokens": ["generates"], "offsets": [43]}}, {"event_type": "MDS", "arguments": [{"text": "representations for constituents", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["representations", "for", "constituents"], "offsets": [52, 53, 54]}], "trigger": {"text": "recursively composes", "tokens": ["recursively", "composes"], "offsets": [50, 51]}}, {"event_type": "MDS", "arguments": [{"text": "constituency parse trees of texts", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["constituency", "parse", "trees", "of", "texts"], "offsets": [44, 45, 46, 47, 48]}, {"text": "representations for constituents", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["representations", "for", "constituents"], "offsets": [52, 53, 54]}, {"text": "images", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["images"], "offsets": [60]}], "trigger": {"text": "matches", "tokens": ["matches"], "offsets": [57]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [62]}, {"text": "matching scores with images", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["matching", "scores", "with", "images"], "offsets": [69, 70, 71, 72]}, {"text": "guide", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["guide"], "offsets": [78]}, {"text": "concreteness of constituents", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["concreteness", "of", "constituents"], "offsets": [64, 65, 66]}], "trigger": {"text": "define", "tokens": ["define"], "offsets": [63]}}, {"event_type": "PUR", "arguments": [{"text": "parsing of text", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["parsing", "of", "text"], "offsets": [80, 81, 82]}], "trigger": {"text": "guide", "tokens": ["guide"], "offsets": [78]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [95]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [90]}}, {"event_type": "CMP", "arguments": [{"text": "mscoco data set", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["mscoco", "data", "set"], "offsets": [87, 88, 89]}, {"text": "visually grounded neural syntax learner", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["visually", "grounded", "neural", "syntax", "learner"], "offsets": [3, 4, 5, 6, 7]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [95]}, {"text": "unsupervised parsing approaches that do not use visual grounding", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["unsupervised", "parsing", "approaches", "that", "do", "not", "use", "visual", "grounding"], "offsets": [97, 98, 99, 100, 101, 102, 103, 104, 105]}, {"text": "against gold parse trees", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["against", "gold", "parse", "trees"], "offsets": [112, 113, 114, 115]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [95]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [117]}, {"text": "more stable", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["more", "stable"], "offsets": [123, 124]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [118]}}, {"event_type": "CMP", "arguments": [{"text": "visually grounded neural syntax learner", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["visually", "grounded", "neural", "syntax", "learner"], "offsets": [3, 4, 5, 6, 7]}, {"text": "more stable", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "stable"], "offsets": [123, 124]}, {"text": "much", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["much"], "offsets": [122]}, {"text": "with respect to the choice of random initialization", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "respect", "to", "the", "choice", "of", "random", "initialization"], "offsets": [125, 126, 127, 128, 129, 130, 131, 132]}], "trigger": {"text": "more stable", "tokens": ["more", "stable"], "offsets": [123, 124]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [140]}, {"text": "correlates", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["correlates"], "offsets": [151]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [142]}}, {"event_type": "FAC", "arguments": [{"text": "similar measure defined by linguists", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["similar", "measure", "defined", "by", "linguists"], "offsets": [155, 156, 157, 158, 159]}, {"text": "concreteness acquired by vg - nsl", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["concreteness", "acquired", "by", "visually", "grounded", "neural", "syntax", "learner"], "offsets": [145, 146, 147, 3, 4, 5, 6, 7]}, {"text": "well", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["well"], "offsets": [152]}], "trigger": {"text": "correlates", "tokens": ["correlates"], "offsets": [151]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [163]}, {"text": "multiple languages", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multiple", "languages"], "offsets": [170, 171]}, {"text": "multi30k data set", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multi30k", "data", "set"], "offsets": [174, 175, 176]}, {"text": "visually grounded neural syntax learner", "nugget_type": "APP", "argument_type": "Content", "tokens": ["visually", "grounded", "neural", "syntax", "learner"], "offsets": [3, 4, 5, 6, 7]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [165]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [183]}, {"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [163]}], "trigger": {"text": "showing", "tokens": ["showing"], "offsets": [178]}}, {"event_type": "CMP", "arguments": [{"text": "prior unsupervised approaches", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["prior", "unsupervised", "approaches"], "offsets": [184, 185, 186]}, {"text": "visually grounded neural syntax learner", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["visually", "grounded", "neural", "syntax", "learner"], "offsets": [3, 4, 5, 6, 7]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [183]}}, {"event_type": "MDS", "arguments": [{"text": "paired captions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["paired", "captions"], "offsets": [37, 38]}], "trigger": {"text": "reading", "tokens": ["reading"], "offsets": [36]}}], "document": ["we", "present", "the", "visually", "grounded", "neural", "syntax", "learner", "(", "vg", "-", "nsl", ")", ",", "an", "approach", "for", "learning", "syntactic", "representations", "and", "structures", "without", "any", "explicit", "supervision", ".", "the", "model", "learns", "by", "looking", "at", "natural", "images", "and", "reading", "paired", "captions", ".", "vg", "-", "nsl", "generates", "constituency", "parse", "trees", "of", "texts", ",", "recursively", "composes", "representations", "for", "constituents", ",", "and", "matches", "them", "with", "images", ".", "we", "define", "concreteness", "of", "constituents", "by", "their", "matching", "scores", "with", "images", ",", "and", "use", "it", "to", "guide", "the", "parsing", "of", "text", ".", "experiments", "on", "the", "mscoco", "data", "set", "show", "that", "vg", "-", "nsl", "outperforms", "various", "unsupervised", "parsing", "approaches", "that", "do", "not", "use", "visual", "grounding", ",", "in", "terms", "of", "f1", "scores", "against", "gold", "parse", "trees", ".", "we", "find", "that", "vgnsl", "is", "much", "more", "stable", "with", "respect", "to", "the", "choice", "of", "random", "initialization", "and", "the", "amount", "of", "training", "data", ".", "we", "also", "find", "that", "the", "concreteness", "acquired", "by", "vg", "-", "nsl", "correlates", "well", "with", "a", "similar", "measure", "defined", "by", "linguists", ".", "finally", ",", "we", "also", "apply", "vg", "-", "nsl", "to", "multiple", "languages", "in", "the", "multi30k", "data", "set", ",", "showing", "that", "our", "model", "consistently", "outperforms", "prior", "unsupervised", "approaches", "."]}, {"venue": "ACL", "title": "A Joint Named-Entity Recognizer for Heterogeneous Tag-sets Using a Tag Hierarchy", "abstract": "We study a variant of domain adaptation for named-entity recognition where multiple, heterogeneously tagged training sets are available. Furthermore, the test tag-set is not identical to any individual training tag-set. Yet, the relations between all tags are provided in a tag hierarchy, covering the test tags as a combination of training tags. This setting occurs when various datasets are created using different annotation schemes. This is also the case of extending a tag-set with a new tag by annotating only the new tag in a new dataset. We propose to use the given tag hierarchy to jointly learn a neural network that shares its tagging layer among all tag-sets. We compare this model to combining independent models and to a model based on the multitasking approach. Our experiments show the benefit of the tag-hierarchy model, especially when facing non-trivial consolidation of tag-sets.", "doc_id": "eb93b9b0ae3a08f6dbbd98d526fffc70", "publication_year": 2019, "sentences": ["we study a variant of domain adaptation for named - entity recognition where multiple , heterogeneously tagged training sets are available .", "furthermore , the test tag - set is not identical to any individual training tag - set .", "yet , the relations between all tags are provided in a tag hierarchy , covering the test tags as a combination of training tags .", "this setting occurs when various datasets are created using different annotation schemes .", "this is also the case of extending a tag - set with a new tag by annotating only the new tag in a new dataset .", "we propose to use the given tag hierarchy to jointly learn a neural network that shares its tagging layer among all tag - sets .", "we compare this model to combining independent models and to a model based on the multitasking approach .", "our experiments show the benefit of the tag - hierarchy model , especially when facing non - trivial consolidation of tag - sets ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "variant of domain adaptation", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["variant", "of", "domain", "adaptation"], "offsets": [3, 4, 5, 6]}, {"text": "named - entity recognition", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["named", "-", "entity", "recognition"], "offsets": [8, 9, 10, 11]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [1]}}, {"event_type": "MDS", "arguments": [{"text": "relations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["relations"], "offsets": [43]}, {"text": "between all tags", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "all", "tags"], "offsets": [44, 45, 46]}, {"text": "tag hierarchy", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["tag", "hierarchy"], "offsets": [51, 52]}], "trigger": {"text": "provided", "tokens": ["provided"], "offsets": [48]}}, {"event_type": "MDS", "arguments": [{"text": "new tag", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["new", "tag"], "offsets": [97, 98]}, {"text": "dataset", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["dataset"], "offsets": [102]}, {"text": "extending", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["extending"], "offsets": [84]}], "trigger": {"text": "annotating", "tokens": ["annotating"], "offsets": [94]}}, {"event_type": "PUR", "arguments": [{"text": "tag - set", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["tag", "-", "set"], "offsets": [86, 87, 88]}], "trigger": {"text": "extending", "tokens": ["extending"], "offsets": [84]}}, {"event_type": "MDS", "arguments": [{"text": "neural network", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["neural", "network"], "offsets": [116, 117]}, {"text": "that shares its tagging layer among all tag - sets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["that", "shares", "neural", "network", "tagging", "layer", "among", "all", "tag", "-", "sets"], "offsets": [118, 119, 116, 117, 121, 122, 123, 124, 125, 126, 127]}, {"text": "given tag hierarchy", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["given", "tag", "hierarchy"], "offsets": [109, 110, 111]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [114]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [129]}, {"text": "combining independent models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["combining", "independent", "models"], "offsets": [134, 135, 136]}, {"text": "model based on the multitasking approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model", "based", "on", "the", "multitasking", "approach"], "offsets": [140, 141, 142, 143, 144, 145]}, {"text": "tag hierarchy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["tag", "hierarchy"], "offsets": [110, 111]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [130]}}, {"event_type": "FAC", "arguments": [{"text": "when facing non - trivial consolidation of tag - sets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "facing", "non", "-", "trivial", "consolidation", "of", "tag", "-", "sets"], "offsets": [160, 161, 162, 163, 164, 165, 166, 167, 168, 169]}, {"text": "benefit of the tag - hierarchy model", "nugget_type": "STR", "argument_type": "Object", "tokens": ["benefit", "of", "the", "tag", "-", "hierarchy", "model"], "offsets": [151, 152, 153, 154, 155, 156, 157]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [149]}}, {"event_type": "MDS", "arguments": [{"text": "combination of training tags", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["combination", "of", "training", "tags"], "offsets": [60, 61, 62, 63]}, {"text": "test tags", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["test", "tags"], "offsets": [56, 57]}], "trigger": {"text": "covering", "tokens": ["covering"], "offsets": [54]}}], "document": ["we", "study", "a", "variant", "of", "domain", "adaptation", "for", "named", "-", "entity", "recognition", "where", "multiple", ",", "heterogeneously", "tagged", "training", "sets", "are", "available", ".", "furthermore", ",", "the", "test", "tag", "-", "set", "is", "not", "identical", "to", "any", "individual", "training", "tag", "-", "set", ".", "yet", ",", "the", "relations", "between", "all", "tags", "are", "provided", "in", "a", "tag", "hierarchy", ",", "covering", "the", "test", "tags", "as", "a", "combination", "of", "training", "tags", ".", "this", "setting", "occurs", "when", "various", "datasets", "are", "created", "using", "different", "annotation", "schemes", ".", "this", "is", "also", "the", "case", "of", "extending", "a", "tag", "-", "set", "with", "a", "new", "tag", "by", "annotating", "only", "the", "new", "tag", "in", "a", "new", "dataset", ".", "we", "propose", "to", "use", "the", "given", "tag", "hierarchy", "to", "jointly", "learn", "a", "neural", "network", "that", "shares", "its", "tagging", "layer", "among", "all", "tag", "-", "sets", ".", "we", "compare", "this", "model", "to", "combining", "independent", "models", "and", "to", "a", "model", "based", "on", "the", "multitasking", "approach", ".", "our", "experiments", "show", "the", "benefit", "of", "the", "tag", "-", "hierarchy", "model", ",", "especially", "when", "facing", "non", "-", "trivial", "consolidation", "of", "tag", "-", "sets", "."]}, {"venue": "ACL", "title": "An Information-theoretic Approach to Prompt Engineering Without Ground Truth Labels", "abstract": "Pre-trained language models derive substantial linguistic and factual knowledge from the massive corpora on which they are trained, and prompt engineering seeks to align these models to specific tasks. Unfortunately, existing prompt engineering methods require significant amounts of labeled data, access to model parameters, or both. We introduce a new method for selecting prompt templates without labeled examples and without direct access to the model. Specifically, over a set of candidate templates, we choose the template that maximizes the mutual information between the input and the corresponding model output. Across 8 datasets representing 7 distinct NLP tasks, we show that when a template has high mutual information, it also has high accuracy on the task. On the largest model, selecting prompts with our method gets 90% of the way from the average prompt accuracy to the best prompt accuracy and requires no ground truth labels.", "doc_id": "74266a87f84525e8e26b393fc4269424", "publication_year": 2022, "sentences": ["pre - trained language models derive substantial linguistic and factual knowledge from the massive corpora on which they are trained , and prompt engineering seeks to align these models to specific tasks .", "unfortunately , existing prompt engineering methods require significant amounts of labeled data , access to model parameters , or both .", "we introduce a new method for selecting prompt templates without labeled examples and without direct access to the model .", "specifically , over a set of candidate templates , we choose the template that maximizes the mutual information between the input and the corresponding model output .", "across 8 datasets representing 7 distinct nlp tasks , we show that when a template has high mutual information , it also has high accuracy on the task .", "on the largest model , selecting prompts with our method gets 90 % of the way from the average prompt accuracy to the best prompt accuracy and requires no ground truth labels ."], "events": [{"event_type": "ITT", "arguments": [{"text": "prompt engineering", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["prompt", "engineering"], "offsets": [22, 23]}], "trigger": {"text": "align", "tokens": ["align"], "offsets": [26]}}, {"event_type": "RWF", "arguments": [{"text": "existing prompt engineering methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "prompt", "engineering", "methods"], "offsets": [35, 36, 37, 38]}, {"text": "significant amounts of labeled data", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["significant", "amounts", "of", "labeled", "data"], "offsets": [40, 41, 42, 43, 44]}], "trigger": {"text": "require", "tokens": ["require"], "offsets": [39]}}, {"event_type": "RWF", "arguments": [{"text": "existing prompt engineering methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "prompt", "engineering", "methods"], "offsets": [35, 36, 37, 38]}, {"text": "model parameters", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["model", "parameters"], "offsets": [48, 49]}], "trigger": {"text": "access", "tokens": ["access"], "offsets": [46]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [54]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [58]}, {"text": "selecting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["selecting"], "offsets": [60]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [55]}}, {"event_type": "PUR", "arguments": [{"text": "prompt templates", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["prompt", "templates"], "offsets": [61, 62]}], "trigger": {"text": "selecting", "tokens": ["selecting"], "offsets": [60]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [83]}, {"text": "template", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["template"], "offsets": [86]}], "trigger": {"text": "choose", "tokens": ["choose"], "offsets": [84]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [110]}, {"text": "has", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["has"], "offsets": [123]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [111]}}, {"event_type": "FAC", "arguments": [{"text": "across 8 datasets representing 7 distinct nlp tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "8", "datasets", "representing", "7", "distinct", "nlp", "tasks"], "offsets": [101, 102, 103, 104, 105, 106, 107, 108]}, {"text": "when a template has high mutual information", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "a", "template", "has", "high", "mutual", "information"], "offsets": [113, 114, 115, 116, 117, 118, 119]}, {"text": "high accuracy", "nugget_type": "STR", "argument_type": "Object", "tokens": ["high", "accuracy"], "offsets": [124, 125]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [123]}}, {"event_type": "FAC", "arguments": [{"text": "selecting prompts with our method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["selecting", "prompts", "with", "our", "method"], "offsets": [135, 136, 137, 138, 139]}, {"text": "90 % of the way", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["90", "%", "of", "the", "way"], "offsets": [141, 142, 143, 144, 145]}, {"text": "from the average prompt accuracy to the best prompt accuracy", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "the", "average", "prompt", "accuracy", "to", "the", "best", "prompt", "accuracy"], "offsets": [146, 147, 148, 149, 150, 151, 152, 153, 154, 155]}], "trigger": {"text": "gets", "tokens": ["gets"], "offsets": [140]}}, {"event_type": "FAC", "arguments": [{"text": "no ground truth labels", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["no", "ground", "truth", "labels"], "offsets": [158, 159, 160, 161]}, {"text": "selecting prompts with our method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["selecting", "prompts", "with", "our", "method"], "offsets": [135, 136, 137, 138, 139]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [157]}}], "document": ["pre", "-", "trained", "language", "models", "derive", "substantial", "linguistic", "and", "factual", "knowledge", "from", "the", "massive", "corpora", "on", "which", "they", "are", "trained", ",", "and", "prompt", "engineering", "seeks", "to", "align", "these", "models", "to", "specific", "tasks", ".", "unfortunately", ",", "existing", "prompt", "engineering", "methods", "require", "significant", "amounts", "of", "labeled", "data", ",", "access", "to", "model", "parameters", ",", "or", "both", ".", "we", "introduce", "a", "new", "method", "for", "selecting", "prompt", "templates", "without", "labeled", "examples", "and", "without", "direct", "access", "to", "the", "model", ".", "specifically", ",", "over", "a", "set", "of", "candidate", "templates", ",", "we", "choose", "the", "template", "that", "maximizes", "the", "mutual", "information", "between", "the", "input", "and", "the", "corresponding", "model", "output", ".", "across", "8", "datasets", "representing", "7", "distinct", "nlp", "tasks", ",", "we", "show", "that", "when", "a", "template", "has", "high", "mutual", "information", ",", "it", "also", "has", "high", "accuracy", "on", "the", "task", ".", "on", "the", "largest", "model", ",", "selecting", "prompts", "with", "our", "method", "gets", "90", "%", "of", "the", "way", "from", "the", "average", "prompt", "accuracy", "to", "the", "best", "prompt", "accuracy", "and", "requires", "no", "ground", "truth", "labels", "."]}, {"venue": "ACL", "title": "Aiming beyond the Obvious: Identifying Non-Obvious Cases in Semantic Similarity Datasets", "abstract": "Existing datasets for scoring text pairs in terms of semantic similarity contain instances whose resolution differs according to the degree of difficulty. This paper proposes to distinguish obvious from non-obvious text pairs based on superficial lexical overlap and ground-truth labels. We characterise existing datasets in terms of containing difficult cases and find that recently proposed models struggle to capture the non-obvious cases of semantic similarity. We describe metrics that emphasise cases of similarity which require more complex inference and propose that these are used for evaluating systems for semantic similarity.", "doc_id": "0dc01b9bd195b01640f8dce5e8bf9b5a", "publication_year": 2019, "sentences": ["existing datasets for scoring text pairs in terms of semantic similarity contain instances whose resolution differs according to the degree of difficulty .", "this paper proposes to distinguish obvious from non - obvious text pairs based on superficial lexical overlap and ground - truth labels .", "we characterise existing datasets in terms of containing difficult cases and find that recently proposed models struggle to capture the non - obvious cases of semantic similarity .", "we describe metrics that emphasise cases of similarity which require more complex inference and propose that these are used for evaluating systems for semantic similarity ."], "events": [{"event_type": "MDS", "arguments": [{"text": "distinguish", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["distinguish"], "offsets": [27]}, {"text": "superficial lexical overlap", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["superficial", "lexical", "overlap"], "offsets": [37, 38, 39]}, {"text": "ground - truth labels", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["ground", "-", "truth", "labels"], "offsets": [41, 42, 43, 44]}], "trigger": {"text": "based", "tokens": ["based"], "offsets": [35]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [46]}, {"text": "in terms of containing difficult cases", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "terms", "of", "containing", "difficult", "cases"], "offsets": [50, 51, 52, 53, 54, 55]}, {"text": "existing datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["existing", "datasets"], "offsets": [48, 49]}], "trigger": {"text": "characterise", "tokens": ["characterise"], "offsets": [47]}}, {"event_type": "RWF", "arguments": [{"text": "recently proposed models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["recently", "proposed", "models"], "offsets": [59, 60, 61]}, {"text": "struggle", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["struggle"], "offsets": [62]}, {"text": "non - obvious cases of semantic similarity", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["non", "-", "obvious", "cases", "of", "semantic", "similarity"], "offsets": [66, 67, 68, 69, 70, 71, 72]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [64]}}, {"event_type": "PRP", "arguments": [{"text": "metrics", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["metrics"], "offsets": [76]}, {"text": "emphasise", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["emphasise"], "offsets": [78]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [74]}], "trigger": {"text": "describe", "tokens": ["describe"], "offsets": [75]}}, {"event_type": "PUR", "arguments": [{"text": "similarity which require more complex inference", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["similarity", "which", "require", "more", "complex", "inference"], "offsets": [81, 82, 83, 84, 85, 86]}], "trigger": {"text": "emphasise", "tokens": ["emphasise"], "offsets": [78]}}, {"event_type": "MDS", "arguments": [{"text": "evaluating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluating"], "offsets": [94]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [92]}}, {"event_type": "PUR", "arguments": [{"text": "from non - obvious text pairs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "non", "-", "obvious", "text", "pairs"], "offsets": [29, 30, 31, 32, 33, 34]}, {"text": "obvious", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["obvious"], "offsets": [28]}], "trigger": {"text": "distinguish", "tokens": ["distinguish"], "offsets": [27]}}, {"event_type": "PUR", "arguments": [{"text": "systems for semantic similarity", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["systems", "for", "semantic", "similarity"], "offsets": [95, 96, 97, 98]}], "trigger": {"text": "evaluating", "tokens": ["evaluating"], "offsets": [94]}}], "document": ["existing", "datasets", "for", "scoring", "text", "pairs", "in", "terms", "of", "semantic", "similarity", "contain", "instances", "whose", "resolution", "differs", "according", "to", "the", "degree", "of", "difficulty", ".", "this", "paper", "proposes", "to", "distinguish", "obvious", "from", "non", "-", "obvious", "text", "pairs", "based", "on", "superficial", "lexical", "overlap", "and", "ground", "-", "truth", "labels", ".", "we", "characterise", "existing", "datasets", "in", "terms", "of", "containing", "difficult", "cases", "and", "find", "that", "recently", "proposed", "models", "struggle", "to", "capture", "the", "non", "-", "obvious", "cases", "of", "semantic", "similarity", ".", "we", "describe", "metrics", "that", "emphasise", "cases", "of", "similarity", "which", "require", "more", "complex", "inference", "and", "propose", "that", "these", "are", "used", "for", "evaluating", "systems", "for", "semantic", "similarity", "."]}, {"venue": "ACL", "title": "S2ORC: The Semantic Scholar Open Research Corpus", "abstract": "We introduce S2ORC, a large corpus of 81.1M English-language academic papers spanning many academic disciplines. The corpus consists of rich metadata, paper abstracts, resolved bibliographic references, as well as structured full text for 8.1M open access papers. Full text is annotated with automatically-detected inline mentions of citations, figures, and tables, each linked to their corresponding paper objects. In S2ORC, we aggregate papers from hundreds of academic publishers and digital archives into a unified source, and create the largest publicly-available collection of machine-readable academic text to date. We hope this resource will facilitate research and development of tools and tasks for text mining over academic text.", "doc_id": "989c48981698591579ee225c6f1bebc5", "publication_year": 2020, "sentences": ["we introduce s2orc , a large corpus of 81 . 1m english - language academic papers spanning many academic disciplines .", "the corpus consists of rich metadata , paper abstracts , resolved bibliographic references , as well as structured full text for 8 . 1m open access papers .", "full text is annotated with automatically - detected inline mentions of citations , figures , and tables , each linked to their corresponding paper objects .", "in s2orc , we aggregate papers from hundreds of academic publishers and digital archives into a unified source , and create the largest publicly - available collection of machine - readable academic text to date .", "we hope this resource will facilitate research and development of tools and tasks for text mining over academic text ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "s2orc", "nugget_type": "DST", "argument_type": "Content", "tokens": ["s2orc"], "offsets": [2]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [1]}}, {"event_type": "WKS", "arguments": [{"text": "s2orc", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["s2orc"], "offsets": [76]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [78]}, {"text": "papers from hundreds of academic publishers and digital archives", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["papers", "from", "hundreds", "of", "academic", "publishers", "and", "digital", "archives"], "offsets": [80, 81, 82, 83, 84, 85, 86, 87, 88]}, {"text": "unified source", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["unified", "source"], "offsets": [91, 92]}], "trigger": {"text": "aggregate", "tokens": ["aggregate"], "offsets": [79]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [78]}, {"text": "largest publicly - available collection of machine - readable academic text to date", "nugget_type": "DST", "argument_type": "Content", "tokens": ["largest", "publicly", "-", "available", "collection", "of", "machine", "-", "readable", "academic", "text", "to", "date"], "offsets": [97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109]}], "trigger": {"text": "create", "tokens": ["create"], "offsets": [95]}}], "document": ["we", "introduce", "s2orc", ",", "a", "large", "corpus", "of", "81", ".", "1m", "english", "-", "language", "academic", "papers", "spanning", "many", "academic", "disciplines", ".", "the", "corpus", "consists", "of", "rich", "metadata", ",", "paper", "abstracts", ",", "resolved", "bibliographic", "references", ",", "as", "well", "as", "structured", "full", "text", "for", "8", ".", "1m", "open", "access", "papers", ".", "full", "text", "is", "annotated", "with", "automatically", "-", "detected", "inline", "mentions", "of", "citations", ",", "figures", ",", "and", "tables", ",", "each", "linked", "to", "their", "corresponding", "paper", "objects", ".", "in", "s2orc", ",", "we", "aggregate", "papers", "from", "hundreds", "of", "academic", "publishers", "and", "digital", "archives", "into", "a", "unified", "source", ",", "and", "create", "the", "largest", "publicly", "-", "available", "collection", "of", "machine", "-", "readable", "academic", "text", "to", "date", ".", "we", "hope", "this", "resource", "will", "facilitate", "research", "and", "development", "of", "tools", "and", "tasks", "for", "text", "mining", "over", "academic", "text", "."]}, {"venue": "ACL", "title": "Grounded Conversation Generation as Guided Traverses in Commonsense Knowledge Graphs", "abstract": "Human conversations naturally evolve around related concepts and hop to distant concepts. This paper presents a new conversation generation model, ConceptFlow, which leverages commonsense knowledge graphs to explicitly model conversation flows. By grounding conversations to the concept space, ConceptFlow represents the potential conversation flow as traverses in the concept space along commonsense relations. The traverse is guided by graph attentions in the concept graph, moving towards more meaningful directions in the concept space, in order to generate more semantic and informative responses. Experiments on Reddit conversations demonstrate ConceptFlow\u2019s effectiveness over previous knowledge-aware conversation models and GPT-2 based models while using 70% fewer parameters, confirming the advantage of explicit modeling conversation structures. All source codes of this work are available at https://github.com/thunlp/ConceptFlow.", "doc_id": "875077ed2b8e3b48da139f6918922057", "publication_year": 2020, "sentences": ["human conversations naturally evolve around related concepts and hop to distant concepts .", "this paper presents a new conversation generation model , conceptflow , which leverages commonsense knowledge graphs to explicitly model conversation flows .", "by grounding conversations to the concept space , conceptflow represents the potential conversation flow as traverses in the concept space along commonsense relations .", "the traverse is guided by graph attentions in the concept graph , moving towards more meaningful directions in the concept space , in order to generate more semantic and informative responses .", "experiments on reddit conversations demonstrate conceptflow \u2019 s effectiveness over previous knowledge - aware conversation models and gpt - 2 based models while using 70 % fewer parameters , confirming the advantage of explicit modeling conversation structures .", "all source codes of this work are available at https : / / github . com / thunlp / conceptflow ."], "events": [{"event_type": "ITT", "arguments": [{"text": "human conversations", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["human", "conversations"], "offsets": [0, 1]}], "trigger": {"text": "evolve", "tokens": ["evolve"], "offsets": [3]}}, {"event_type": "PRP", "arguments": [{"text": "conversation generation model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["conversation", "generation", "model"], "offsets": [18, 19, 20]}], "trigger": {"text": "presents", "tokens": ["presents"], "offsets": [15]}}, {"event_type": "FIN", "arguments": [{"text": "confirming", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["confirming"], "offsets": [120]}, {"text": "effectiveness", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["effectiveness"], "offsets": [99]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [95]}}, {"event_type": "CMP", "arguments": [{"text": "conceptflow", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["conceptflow"], "offsets": [96]}, {"text": "previous knowledge - aware conversation models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "knowledge", "-", "aware", "conversation", "models"], "offsets": [101, 102, 103, 104, 105, 106]}, {"text": "gpt - 2 based models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["gpt", "-", "2", "based", "models"], "offsets": [108, 109, 110, 111, 112]}, {"text": "while using 70 % fewer parameters", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "using", "70", "%", "fewer", "parameters"], "offsets": [113, 114, 115, 116, 117, 118]}], "trigger": {"text": "effectiveness", "tokens": ["effectiveness"], "offsets": [99]}}, {"event_type": "FAC", "arguments": [{"text": "advantage of explicit modeling conversation structures", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["advantage", "of", "explicit", "modeling", "conversation", "structures"], "offsets": [122, 123, 124, 125, 126, 127]}], "trigger": {"text": "confirming", "tokens": ["confirming"], "offsets": [120]}}, {"event_type": "MDS", "arguments": [{"text": "commonsense knowledge graphs", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["commonsense", "knowledge", "graphs"], "offsets": [26, 27, 28]}, {"text": "conversation flows", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["conversation", "flows"], "offsets": [32, 33]}], "trigger": {"text": "explicitly model", "tokens": ["explicitly", "model"], "offsets": [30, 31]}}, {"event_type": "MDS", "arguments": [{"text": "conversations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["conversations"], "offsets": [37]}, {"text": "concept space", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["concept", "space"], "offsets": [40, 41]}], "trigger": {"text": "grounding", "tokens": ["grounding"], "offsets": [36]}}, {"event_type": "MDS", "arguments": [{"text": "potential conversation flow", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["potential", "conversation", "flow"], "offsets": [46, 47, 48]}, {"text": "concept space", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["concept", "space"], "offsets": [53, 54]}, {"text": "along commonsense relations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["along", "commonsense", "relations"], "offsets": [55, 56, 57]}], "trigger": {"text": "traverses", "tokens": ["traverses"], "offsets": [50]}}, {"event_type": "MDS", "arguments": [{"text": "traverse", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["traverse"], "offsets": [60]}, {"text": "graph attentions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["graph", "attentions"], "offsets": [64, 65]}, {"text": "moving towards", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["moving", "towards"], "offsets": [71, 72]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [84]}], "trigger": {"text": "guided", "tokens": ["guided"], "offsets": [62]}}, {"event_type": "PUR", "arguments": [{"text": "more meaningful directions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["more", "meaningful", "directions"], "offsets": [73, 74, 75]}, {"text": "in the concept space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "concept", "space"], "offsets": [76, 77, 78, 79]}], "trigger": {"text": "moving towards", "tokens": ["moving", "towards"], "offsets": [71, 72]}}, {"event_type": "PUR", "arguments": [{"text": "more semantic responses", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["more", "semantic", "responses"], "offsets": [85, 86, 89]}, {"text": "informative responses", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["informative", "responses"], "offsets": [88, 89]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [84]}}], "document": ["human", "conversations", "naturally", "evolve", "around", "related", "concepts", "and", "hop", "to", "distant", "concepts", ".", "this", "paper", "presents", "a", "new", "conversation", "generation", "model", ",", "conceptflow", ",", "which", "leverages", "commonsense", "knowledge", "graphs", "to", "explicitly", "model", "conversation", "flows", ".", "by", "grounding", "conversations", "to", "the", "concept", "space", ",", "conceptflow", "represents", "the", "potential", "conversation", "flow", "as", "traverses", "in", "the", "concept", "space", "along", "commonsense", "relations", ".", "the", "traverse", "is", "guided", "by", "graph", "attentions", "in", "the", "concept", "graph", ",", "moving", "towards", "more", "meaningful", "directions", "in", "the", "concept", "space", ",", "in", "order", "to", "generate", "more", "semantic", "and", "informative", "responses", ".", "experiments", "on", "reddit", "conversations", "demonstrate", "conceptflow", "\u2019", "s", "effectiveness", "over", "previous", "knowledge", "-", "aware", "conversation", "models", "and", "gpt", "-", "2", "based", "models", "while", "using", "70", "%", "fewer", "parameters", ",", "confirming", "the", "advantage", "of", "explicit", "modeling", "conversation", "structures", ".", "all", "source", "codes", "of", "this", "work", "are", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "thunlp", "/", "conceptflow", "."]}, {"venue": "ACL", "title": "Hypernymy Detection for Low-Resource Languages via Meta Learning", "abstract": "Hypernymy detection, a.k.a, lexical entailment, is a fundamental sub-task of many natural language understanding tasks. Previous explorations mostly focus on monolingual hypernymy detection on high-resource languages, e.g., English, but few investigate the low-resource scenarios. This paper addresses the problem of low-resource hypernymy detection by combining high-resource languages. We extensively compare three joint training paradigms and for the first time propose applying meta learning to relieve the low-resource issue. Experiments demonstrate the superiority of our method among the three settings, which substantially improves the performance of extremely low-resource languages by preventing over-fitting on small datasets.", "doc_id": "339f2388717941e6a6c4fd5e9091bda7", "publication_year": 2020, "sentences": ["hypernymy detection , a . k . a , lexical entailment , is a fundamental sub - task of many natural language understanding tasks .", "previous explorations mostly focus on monolingual hypernymy detection on high - resource languages , e . g . , english , but few investigate the low - resource scenarios .", "this paper addresses the problem of low - resource hypernymy detection by combining high - resource languages .", "we extensively compare three joint training paradigms and for the first time propose applying meta learning to relieve the low - resource issue .", "experiments demonstrate the superiority of our method among the three settings , which substantially improves the performance of extremely low - resource languages by preventing over - fitting on small datasets ."], "events": [{"event_type": "ITT", "arguments": [{"text": "hypernymy detection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["hypernymy", "detection"], "offsets": [0, 1]}], "trigger": {"text": "fundamental", "tokens": ["fundamental"], "offsets": [14]}}, {"event_type": "MDS", "arguments": [{"text": "high - resource languages", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["high", "-", "resource", "languages"], "offsets": [68, 69, 70, 71]}], "trigger": {"text": "combining", "tokens": ["combining"], "offsets": [67]}}, {"event_type": "WKS", "arguments": [{"text": "problem of low - resource hypernymy detection", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["problem", "of", "low", "-", "resource", "hypernymy", "detection"], "offsets": [59, 60, 61, 62, 63, 64, 65]}], "trigger": {"text": "addresses", "tokens": ["addresses"], "offsets": [57]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [73]}, {"text": "three joint training paradigms", "nugget_type": "APP", "argument_type": "Content", "tokens": ["three", "joint", "training", "paradigms"], "offsets": [76, 77, 78, 79]}], "trigger": {"text": "extensively compare", "tokens": ["extensively", "compare"], "offsets": [74, 75]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [73]}, {"text": "applying meta learning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["applying", "meta", "learning"], "offsets": [86, 87, 88]}, {"text": "relieve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["relieve"], "offsets": [90]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [85]}}, {"event_type": "PUR", "arguments": [{"text": "low - resource issue", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["low", "-", "resource", "issue"], "offsets": [92, 93, 94, 95]}], "trigger": {"text": "relieve", "tokens": ["relieve"], "offsets": [90]}}, {"event_type": "FIN", "arguments": [{"text": "superiority", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["superiority"], "offsets": [100]}, {"text": "improves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["improves"], "offsets": [111]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [98]}}, {"event_type": "CMP", "arguments": [{"text": "superiority", "nugget_type": "STR", "argument_type": "Result", "tokens": ["superiority"], "offsets": [100]}, {"text": "applying meta learning", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["applying", "meta", "learning"], "offsets": [86, 87, 88]}], "trigger": {"text": "superiority", "tokens": ["superiority"], "offsets": [100]}}, {"event_type": "FAC", "arguments": [{"text": "applying meta learning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["applying", "meta", "learning"], "offsets": [86, 87, 88]}, {"text": "substantially", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["substantially"], "offsets": [110]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["performance"], "offsets": [113]}, {"text": "extremely low - resource languages", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["extremely", "low", "-", "resource", "languages"], "offsets": [115, 116, 117, 118, 119]}, {"text": "on small datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["on", "small", "datasets"], "offsets": [125, 126, 127]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [111]}}], "document": ["hypernymy", "detection", ",", "a", ".", "k", ".", "a", ",", "lexical", "entailment", ",", "is", "a", "fundamental", "sub", "-", "task", "of", "many", "natural", "language", "understanding", "tasks", ".", "previous", "explorations", "mostly", "focus", "on", "monolingual", "hypernymy", "detection", "on", "high", "-", "resource", "languages", ",", "e", ".", "g", ".", ",", "english", ",", "but", "few", "investigate", "the", "low", "-", "resource", "scenarios", ".", "this", "paper", "addresses", "the", "problem", "of", "low", "-", "resource", "hypernymy", "detection", "by", "combining", "high", "-", "resource", "languages", ".", "we", "extensively", "compare", "three", "joint", "training", "paradigms", "and", "for", "the", "first", "time", "propose", "applying", "meta", "learning", "to", "relieve", "the", "low", "-", "resource", "issue", ".", "experiments", "demonstrate", "the", "superiority", "of", "our", "method", "among", "the", "three", "settings", ",", "which", "substantially", "improves", "the", "performance", "of", "extremely", "low", "-", "resource", "languages", "by", "preventing", "over", "-", "fitting", "on", "small", "datasets", "."]}, {"venue": "ACL", "title": "Social Biases in NLP Models as Barriers for Persons with Disabilities", "abstract": "Building equitable and inclusive NLP technologies demands consideration of whether and how social attitudes are represented in ML models. In particular, representations encoded in models often inadvertently perpetuate undesirable social biases from the data on which they are trained. In this paper, we present evidence of such undesirable biases towards mentions of disability in two different English language models: toxicity prediction and sentiment analysis. Next, we demonstrate that the neural embeddings that are the critical first step in most NLP pipelines similarly contain undesirable biases towards mentions of disability. We end by highlighting topical biases in the discourse about disability which may contribute to the observed model biases; for instance, gun violence, homelessness, and drug addiction are over-represented in texts discussing mental illness.", "doc_id": "f497945ee4031c5fd7c6c6c4abfbddbb", "publication_year": 2020, "sentences": ["building equitable and inclusive nlp technologies demands consideration of whether and how social attitudes are represented in ml models .", "in particular , representations encoded in models often inadvertently perpetuate undesirable social biases from the data on which they are trained .", "in this paper , we present evidence of such undesirable biases towards mentions of disability in two different english language models : toxicity prediction and sentiment analysis .", "next , we demonstrate that the neural embeddings that are the critical first step in most nlp pipelines similarly contain undesirable biases towards mentions of disability .", "we end by highlighting topical biases in the discourse about disability which may contribute to the observed model biases ; for instance , gun violence , homelessness , and drug addiction are over - represented in texts discussing mental illness ."], "events": [{"event_type": "ITT", "arguments": [{"text": "nlp technologies", "nugget_type": "APP", "argument_type": "Target", "tokens": ["nlp", "technologies"], "offsets": [4, 5]}], "trigger": {"text": "building", "tokens": ["building"], "offsets": [0]}}, {"event_type": "RWF", "arguments": [{"text": "representations", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["representations"], "offsets": [23]}, {"text": "undesirable social biases", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["undesirable", "social", "biases"], "offsets": [30, 31, 32]}], "trigger": {"text": "inadvertently perpetuate", "tokens": ["inadvertently", "perpetuate"], "offsets": [28, 29]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [46]}, {"text": "undesirable biases", "nugget_type": "WEA", "argument_type": "Content", "tokens": ["undesirable", "biases"], "offsets": [51, 52]}, {"text": "in two different english language models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "two", "different", "english", "language", "models"], "offsets": [57, 58, 59, 60, 61, 62]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [47]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [97]}, {"text": "topical biases in the discourse about disability", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["topical", "biases", "in", "the", "discourse", "about", "disability"], "offsets": [101, 102, 103, 104, 105, 106, 107]}], "trigger": {"text": "highlighting", "tokens": ["highlighting"], "offsets": [100]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [72]}, {"text": "similarly contain", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["similarly", "contain"], "offsets": [88, 89]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [73]}}, {"event_type": "FAC", "arguments": [{"text": "neural embeddings", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["neural", "embeddings"], "offsets": [76, 77]}, {"text": "undesirable biases", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["undesirable", "biases"], "offsets": [90, 91]}], "trigger": {"text": "similarly contain", "tokens": ["similarly", "contain"], "offsets": [88, 89]}}], "document": ["building", "equitable", "and", "inclusive", "nlp", "technologies", "demands", "consideration", "of", "whether", "and", "how", "social", "attitudes", "are", "represented", "in", "ml", "models", ".", "in", "particular", ",", "representations", "encoded", "in", "models", "often", "inadvertently", "perpetuate", "undesirable", "social", "biases", "from", "the", "data", "on", "which", "they", "are", "trained", ".", "in", "this", "paper", ",", "we", "present", "evidence", "of", "such", "undesirable", "biases", "towards", "mentions", "of", "disability", "in", "two", "different", "english", "language", "models", ":", "toxicity", "prediction", "and", "sentiment", "analysis", ".", "next", ",", "we", "demonstrate", "that", "the", "neural", "embeddings", "that", "are", "the", "critical", "first", "step", "in", "most", "nlp", "pipelines", "similarly", "contain", "undesirable", "biases", "towards", "mentions", "of", "disability", ".", "we", "end", "by", "highlighting", "topical", "biases", "in", "the", "discourse", "about", "disability", "which", "may", "contribute", "to", "the", "observed", "model", "biases", ";", "for", "instance", ",", "gun", "violence", ",", "homelessness", ",", "and", "drug", "addiction", "are", "over", "-", "represented", "in", "texts", "discussing", "mental", "illness", "."]}, {"venue": "ACL", "title": "Argument Generation with Retrieval, Planning, and Realization", "abstract": "Automatic argument generation is an appealing but challenging task. In this paper, we study the specific problem of counter-argument generation, and present a novel framework, CANDELA. It consists of a powerful retrieval system and a novel two-step generation model, where a text planning decoder first decides on the main talking points and a proper language style for each sentence, then a content realization decoder reflects the decisions and constructs an informative paragraph-level argument. Furthermore, our generation model is empowered by a retrieval system indexed with 12 million articles collected from Wikipedia and popular English news media, which provides access to high-quality content with diversity. Automatic evaluation on a large-scale dataset collected from Reddit shows that our model yields significantly higher BLEU, ROUGE, and METEOR scores than the state-of-the-art and non-trivial comparisons. Human evaluation further indicates that our system arguments are more appropriate for refutation and richer in content.", "doc_id": "3aa11d80fa97b4a6f7b435bb3de7007a", "publication_year": 2019, "sentences": ["automatic argument generation is an appealing but challenging task .", "in this paper , we study the specific problem of counter - argument generation , and present a novel framework , candela .", "it consists of a powerful retrieval system and a novel two - step generation model , where a text planning decoder first decides on the main talking points and a proper language style for each sentence , then a content realization decoder reflects the decisions and constructs an informative paragraph - level argument .", "furthermore , our generation model is empowered by a retrieval system indexed with 12 million articles collected from wikipedia and popular english news media , which provides access to high - quality content with diversity .", "automatic evaluation on a large - scale dataset collected from reddit shows that our model yields significantly higher bleu , rouge , and meteor scores than the state - of - the - art and non - trivial comparisons .", "human evaluation further indicates that our system arguments are more appropriate for refutation and richer in content ."], "events": [{"event_type": "ITT", "arguments": [{"text": "automatic argument generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["automatic", "argument", "generation"], "offsets": [0, 1, 2]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [8]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [14]}, {"text": "counter - argument generation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["counter", "-", "argument", "generation"], "offsets": [20, 21, 22, 23]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [15]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [14]}, {"text": "candela", "nugget_type": "APP", "argument_type": "Content", "tokens": ["candela"], "offsets": [31]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [26]}}, {"event_type": "MDS", "arguments": [{"text": "talking points", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["talking", "points"], "offsets": [59, 60]}, {"text": "proper language style for each sentence", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["proper", "language", "style", "for", "each", "sentence"], "offsets": [63, 64, 65, 66, 67, 68]}], "trigger": {"text": "decides", "tokens": ["decides"], "offsets": [55]}}, {"event_type": "MDS", "arguments": [{"text": "informative paragraph - level argument", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["informative", "paragraph", "-", "level", "argument"], "offsets": [81, 82, 83, 84, 85]}], "trigger": {"text": "constructs", "tokens": ["constructs"], "offsets": [79]}}, {"event_type": "FIN", "arguments": [{"text": "yields", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["yields"], "offsets": [138]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [134]}}, {"event_type": "CMP", "arguments": [{"text": "large - scale dataset collected from reddit", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["large", "-", "scale", "dataset", "collected", "from", "reddit"], "offsets": [127, 128, 129, 130, 131, 132, 133]}, {"text": "higher", "nugget_type": "STR", "argument_type": "Result", "tokens": ["higher"], "offsets": [140]}, {"text": "candela", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["candela"], "offsets": [31]}, {"text": "state - of - the - art and non - trivial comparisons", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "and", "non", "-", "trivial", "comparisons"], "offsets": [150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161]}, {"text": "bleu scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["bleu", "scores"], "offsets": [141, 147]}, {"text": "rouge scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["rouge", "scores"], "offsets": [143, 147]}, {"text": "meteor scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["meteor", "scores"], "offsets": [146, 147]}], "trigger": {"text": "yields", "tokens": ["yields"], "offsets": [138]}}, {"event_type": "FIN", "arguments": [{"text": "more appropriate for refutation", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["more", "appropriate", "for", "refutation"], "offsets": [172, 173, 174, 175]}], "trigger": {"text": "indicates", "tokens": ["indicates"], "offsets": [166]}}, {"event_type": "CMP", "arguments": [{"text": "system arguments", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["system", "arguments"], "offsets": [169, 170]}, {"text": "richer in content", "nugget_type": "STR", "argument_type": "Result", "tokens": ["richer", "in", "content"], "offsets": [177, 178, 179]}], "trigger": {"text": "more appropriate for refutation", "tokens": ["more", "appropriate", "for", "refutation"], "offsets": [172, 173, 174, 175]}}], "document": ["automatic", "argument", "generation", "is", "an", "appealing", "but", "challenging", "task", ".", "in", "this", "paper", ",", "we", "study", "the", "specific", "problem", "of", "counter", "-", "argument", "generation", ",", "and", "present", "a", "novel", "framework", ",", "candela", ".", "it", "consists", "of", "a", "powerful", "retrieval", "system", "and", "a", "novel", "two", "-", "step", "generation", "model", ",", "where", "a", "text", "planning", "decoder", "first", "decides", "on", "the", "main", "talking", "points", "and", "a", "proper", "language", "style", "for", "each", "sentence", ",", "then", "a", "content", "realization", "decoder", "reflects", "the", "decisions", "and", "constructs", "an", "informative", "paragraph", "-", "level", "argument", ".", "furthermore", ",", "our", "generation", "model", "is", "empowered", "by", "a", "retrieval", "system", "indexed", "with", "12", "million", "articles", "collected", "from", "wikipedia", "and", "popular", "english", "news", "media", ",", "which", "provides", "access", "to", "high", "-", "quality", "content", "with", "diversity", ".", "automatic", "evaluation", "on", "a", "large", "-", "scale", "dataset", "collected", "from", "reddit", "shows", "that", "our", "model", "yields", "significantly", "higher", "bleu", ",", "rouge", ",", "and", "meteor", "scores", "than", "the", "state", "-", "of", "-", "the", "-", "art", "and", "non", "-", "trivial", "comparisons", ".", "human", "evaluation", "further", "indicates", "that", "our", "system", "arguments", "are", "more", "appropriate", "for", "refutation", "and", "richer", "in", "content", "."]}, {"venue": "ACL", "title": "Semantic Graphs for Generating Deep Questions", "abstract": "This paper proposes the problem of Deep Question Generation (DQG), which aims to generate complex questions that require reasoning over multiple pieces of information about the input passage. In order to capture the global structure of the document and facilitate reasoning, we propose a novel framework that first constructs a semantic-level graph for the input document and then encodes the semantic graph by introducing an attention-based GGNN (Att-GGNN). Afterward, we fuse the document-level and graph-level representations to perform joint training of content selection and question decoding. On the HotpotQA deep-question centric dataset, our model greatly improves performance over questions requiring reasoning over multiple facts, leading to state-of-the-art performance. The code is publicly available at https://github.com/WING-NUS/SG-Deep-Question-Generation.", "doc_id": "3f4f20abdd45ed0a3c61d7544a2cc3fd", "publication_year": 2020, "sentences": ["this paper proposes the problem of deep question generation ( dqg ) , which aims to generate complex questions that require reasoning over multiple pieces of information about the input passage .", "in order to capture the global structure of the document and facilitate reasoning , we propose a novel framework that first constructs a semantic - level graph for the input document and then encodes the semantic graph by introducing an attention - based ggnn ( att - ggnn ) .", "afterward , we fuse the document - level and graph - level representations to perform joint training of content selection and question decoding .", "on the hotpotqa deep - question centric dataset , our model greatly improves performance over questions requiring reasoning over multiple facts , leading to state - of - the - art performance .", "the code is publicly available at https : / / github . com / wing - nus / sg - deep - question - generation ."], "events": [{"event_type": "ITT", "arguments": [{"text": "deep question generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["deep", "question", "generation"], "offsets": [6, 7, 8]}], "trigger": {"text": "proposes", "tokens": ["proposes"], "offsets": [2]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [46]}, {"text": "novel framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["novel", "framework"], "offsets": [49, 50]}, {"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [35]}, {"text": "facilitate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["facilitate"], "offsets": [43]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [47]}}, {"event_type": "PUR", "arguments": [{"text": "global structure of the document", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["global", "structure", "of", "the", "document"], "offsets": [37, 38, 39, 40, 41]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [35]}}, {"event_type": "PUR", "arguments": [{"text": "reasoning", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["reasoning"], "offsets": [44]}], "trigger": {"text": "facilitate", "tokens": ["facilitate"], "offsets": [43]}}, {"event_type": "MDS", "arguments": [{"text": "semantic - level graph", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["semantic", "-", "level", "graph"], "offsets": [55, 56, 57, 58]}, {"text": "input document", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["input", "document"], "offsets": [61, 62]}], "trigger": {"text": "constructs", "tokens": ["constructs"], "offsets": [53]}}, {"event_type": "MDS", "arguments": [{"text": "attention - based ggnn", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["attention", "-", "based", "ggnn"], "offsets": [72, 73, 74, 75]}, {"text": "encodes", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["encodes"], "offsets": [65]}], "trigger": {"text": "introducing", "tokens": ["introducing"], "offsets": [70]}}, {"event_type": "PUR", "arguments": [{"text": "semantic graph", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["semantic", "graph"], "offsets": [67, 68]}], "trigger": {"text": "encodes", "tokens": ["encodes"], "offsets": [65]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [84]}, {"text": "perform", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["perform"], "offsets": [96]}, {"text": "document - level representations", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["document", "-", "level", "representations"], "offsets": [87, 88, 89, 94]}, {"text": "graph - level representations", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["graph", "-", "level", "representations"], "offsets": [91, 92, 93, 94]}], "trigger": {"text": "fuse", "tokens": ["fuse"], "offsets": [85]}}, {"event_type": "PUR", "arguments": [{"text": "joint training of content selection and question decoding", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["joint", "training", "of", "content", "selection", "and", "question", "decoding"], "offsets": [97, 98, 99, 100, 101, 102, 103, 104]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [96]}}, {"event_type": "FAC", "arguments": [{"text": "hotpotqa deep - question centric dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["hotpotqa", "deep", "-", "question", "centric", "dataset"], "offsets": [108, 109, 110, 111, 112, 113]}, {"text": "framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["framework"], "offsets": [50]}, {"text": "performance over questions requiring reasoning over multiple facts", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance", "over", "questions", "requiring", "reasoning", "over", "multiple", "facts"], "offsets": [119, 120, 121, 122, 123, 124, 125, 126]}], "trigger": {"text": "greatly improves", "tokens": ["greatly", "improves"], "offsets": [117, 118]}}], "document": ["this", "paper", "proposes", "the", "problem", "of", "deep", "question", "generation", "(", "dqg", ")", ",", "which", "aims", "to", "generate", "complex", "questions", "that", "require", "reasoning", "over", "multiple", "pieces", "of", "information", "about", "the", "input", "passage", ".", "in", "order", "to", "capture", "the", "global", "structure", "of", "the", "document", "and", "facilitate", "reasoning", ",", "we", "propose", "a", "novel", "framework", "that", "first", "constructs", "a", "semantic", "-", "level", "graph", "for", "the", "input", "document", "and", "then", "encodes", "the", "semantic", "graph", "by", "introducing", "an", "attention", "-", "based", "ggnn", "(", "att", "-", "ggnn", ")", ".", "afterward", ",", "we", "fuse", "the", "document", "-", "level", "and", "graph", "-", "level", "representations", "to", "perform", "joint", "training", "of", "content", "selection", "and", "question", "decoding", ".", "on", "the", "hotpotqa", "deep", "-", "question", "centric", "dataset", ",", "our", "model", "greatly", "improves", "performance", "over", "questions", "requiring", "reasoning", "over", "multiple", "facts", ",", "leading", "to", "state", "-", "of", "-", "the", "-", "art", "performance", ".", "the", "code", "is", "publicly", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "wing", "-", "nus", "/", "sg", "-", "deep", "-", "question", "-", "generation", "."]}, {"venue": "ACL", "title": "Optimizing the Factual Correctness of a Summary: A Study of Summarizing Radiology Reports", "abstract": "Neural abstractive summarization models are able to generate summaries which have high overlap with human references. However, existing models are not optimized for factual correctness, a critical metric in real-world applications. In this work, we develop a general framework where we evaluate the factual correctness of a generated summary by fact-checking it automatically against its reference using an information extraction module. We further propose a training strategy which optimizes a neural summarization model with a factual correctness reward via reinforcement learning. We apply the proposed method to the summarization of radiology reports, where factual correctness is a key requirement. On two separate datasets collected from hospitals, we show via both automatic and human evaluation that the proposed approach substantially improves the factual correctness and overall quality of outputs over a competitive neural summarization system, producing radiology summaries that approach the quality of human-authored ones.", "doc_id": "3754b2641450768e9575f6be22a2a8d7", "publication_year": 2020, "sentences": ["neural abstractive summarization models are able to generate summaries which have high overlap with human references .", "however , existing models are not optimized for factual correctness , a critical metric in real - world applications .", "in this work , we develop a general framework where we evaluate the factual correctness of a generated summary by fact - checking it automatically against its reference using an information extraction module .", "we further propose a training strategy which optimizes a neural summarization model with a factual correctness reward via reinforcement learning .", "we apply the proposed method to the summarization of radiology reports , where factual correctness is a key requirement .", "on two separate datasets collected from hospitals , we show via both automatic and human evaluation that the proposed approach substantially improves the factual correctness and overall quality of outputs over a competitive neural summarization system , producing radiology summaries that approach the quality of human - authored ones ."], "events": [{"event_type": "ITT", "arguments": [{"text": "neural abstractive summarization models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["neural", "abstractive", "summarization", "models"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "existing models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "models"], "offsets": [19, 20]}, {"text": "not optimized", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "optimized"], "offsets": [22, 23]}], "trigger": {"text": "not optimized", "tokens": ["not", "optimized"], "offsets": [22, 23]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [41]}, {"text": "general framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["general", "framework"], "offsets": [44, 45]}, {"text": "evaluate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluate"], "offsets": [48]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [42]}}, {"event_type": "PUR", "arguments": [{"text": "factual correctness of a generated summary", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["factual", "correctness", "of", "a", "generated", "summary"], "offsets": [50, 51, 52, 53, 54, 55]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [48]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [41]}, {"text": "information extraction module", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["information", "extraction", "module"], "offsets": [67, 68, 69]}, {"text": "fact - checking", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["fact", "-", "checking"], "offsets": [57, 58, 59]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [65]}}, {"event_type": "PUR", "arguments": [{"text": "factual correctness of a generated summary", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["factual", "correctness", "of", "a", "generated", "summary"], "offsets": [50, 51, 52, 53, 54, 55]}, {"text": "against its reference", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["against", "its", "reference"], "offsets": [62, 63, 64]}], "trigger": {"text": "fact - checking", "tokens": ["fact", "-", "checking"], "offsets": [57, 58, 59]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [71]}, {"text": "training strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["training", "strategy"], "offsets": [75, 76]}, {"text": "optimizes", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["optimizes"], "offsets": [78]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [73]}}, {"event_type": "PUR", "arguments": [{"text": "via reinforcement learning", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "reinforcement", "learning"], "offsets": [88, 89, 90]}, {"text": "neural summarization model with a factual correctness reward", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["neural", "summarization", "model", "with", "a", "factual", "correctness", "reward"], "offsets": [80, 81, 82, 83, 84, 85, 86, 87]}], "trigger": {"text": "optimizes", "tokens": ["optimizes"], "offsets": [78]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [92]}, {"text": "training strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["training", "strategy"], "offsets": [75, 76]}, {"text": "summarization of radiology reports", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["summarization", "of", "radiology", "reports"], "offsets": [99, 100, 101, 102]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [93]}}, {"event_type": "FIN", "arguments": [{"text": "improves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["improves"], "offsets": [133]}, {"text": "approach", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["approach"], "offsets": [153]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [121]}}, {"event_type": "CMP", "arguments": [{"text": "two separate datasets collected from hospitals", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "separate", "datasets", "collected", "from", "hospitals"], "offsets": [113, 114, 115, 116, 117, 118]}, {"text": "via both automatic and human evaluation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "both", "automatic", "and", "human", "evaluation"], "offsets": [122, 123, 124, 125, 126, 127]}, {"text": "training strategy", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["training", "strategy"], "offsets": [75, 76]}, {"text": "substantially", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["substantially"], "offsets": [132]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [133]}, {"text": "factual correctness", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["factual", "correctness"], "offsets": [135, 136]}, {"text": "overall quality of outputs", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["overall", "quality", "of", "outputs"], "offsets": [138, 139, 140, 141]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [133]}}, {"event_type": "CMP", "arguments": [{"text": "two separate datasets collected from hospitals", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "separate", "datasets", "collected", "from", "hospitals"], "offsets": [113, 114, 115, 116, 117, 118]}, {"text": "via both automatic and human evaluation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "both", "automatic", "and", "human", "evaluation"], "offsets": [122, 123, 124, 125, 126, 127]}, {"text": "radiology summaries", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["radiology", "summaries"], "offsets": [150, 151]}, {"text": "human - authored ones", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["human", "-", "authored", "radiology", "summaries"], "offsets": [157, 158, 159, 150, 151]}], "trigger": {"text": "approach", "tokens": ["approach"], "offsets": [153]}}], "document": ["neural", "abstractive", "summarization", "models", "are", "able", "to", "generate", "summaries", "which", "have", "high", "overlap", "with", "human", "references", ".", "however", ",", "existing", "models", "are", "not", "optimized", "for", "factual", "correctness", ",", "a", "critical", "metric", "in", "real", "-", "world", "applications", ".", "in", "this", "work", ",", "we", "develop", "a", "general", "framework", "where", "we", "evaluate", "the", "factual", "correctness", "of", "a", "generated", "summary", "by", "fact", "-", "checking", "it", "automatically", "against", "its", "reference", "using", "an", "information", "extraction", "module", ".", "we", "further", "propose", "a", "training", "strategy", "which", "optimizes", "a", "neural", "summarization", "model", "with", "a", "factual", "correctness", "reward", "via", "reinforcement", "learning", ".", "we", "apply", "the", "proposed", "method", "to", "the", "summarization", "of", "radiology", "reports", ",", "where", "factual", "correctness", "is", "a", "key", "requirement", ".", "on", "two", "separate", "datasets", "collected", "from", "hospitals", ",", "we", "show", "via", "both", "automatic", "and", "human", "evaluation", "that", "the", "proposed", "approach", "substantially", "improves", "the", "factual", "correctness", "and", "overall", "quality", "of", "outputs", "over", "a", "competitive", "neural", "summarization", "system", ",", "producing", "radiology", "summaries", "that", "approach", "the", "quality", "of", "human", "-", "authored", "ones", "."]}, {"venue": "ACL", "title": "Position Bias Mitigation: A Knowledge-Aware Graph Model for Emotion Cause Extraction", "abstract": "The Emotion Cause Extraction (ECE) task aims to identify clauses which contain emotion-evoking information for a particular emotion expressed in text. We observe that a widely-used ECE dataset exhibits a bias that the majority of annotated cause clauses are either directly before their associated emotion clauses or are the emotion clauses themselves. Existing models for ECE tend to explore such relative position information and suffer from the dataset bias. To investigate the degree of reliance of existing ECE models on clause relative positions, we propose a novel strategy to generate adversarial examples in which the relative position information is no longer the indicative feature of cause clauses. We test the performance of existing models on such adversarial examples and observe a significant performance drop. To address the dataset bias, we propose a novel graph-based method to explicitly model the emotion triggering paths by leveraging the commonsense knowledge to enhance the semantic dependencies between a candidate clause and an emotion clause. Experimental results show that our proposed approach performs on par with the existing state-of-the-art methods on the original ECE dataset, and is more robust against adversarial attacks compared to existing models.", "doc_id": "83d2772e7c2ad6666eedf4bf9d778372", "publication_year": 2021, "sentences": ["the emotion cause extraction ( ece ) task aims to identify clauses which contain emotion - evoking information for a particular emotion expressed in text .", "we observe that a widely - used ece dataset exhibits a bias that the majority of annotated cause clauses are either directly before their associated emotion clauses or are the emotion clauses themselves .", "existing models for ece tend to explore such relative position information and suffer from the dataset bias .", "to investigate the degree of reliance of existing ece models on clause relative positions , we propose a novel strategy to generate adversarial examples in which the relative position information is no longer the indicative feature of cause clauses .", "we test the performance of existing models on such adversarial examples and observe a significant performance drop .", "to address the dataset bias , we propose a novel graph - based method to explicitly model the emotion triggering paths by leveraging the commonsense knowledge to enhance the semantic dependencies between a candidate clause and an emotion clause .", "experimental results show that our proposed approach performs on par with the existing state - of - the - art methods on the original ece dataset , and is more robust against adversarial attacks compared to existing models ."], "events": [{"event_type": "ITT", "arguments": [{"text": "emotion cause extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["emotion", "cause", "extraction"], "offsets": [1, 2, 3]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "dataset bias", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["dataset", "bias"], "offsets": [75, 76]}], "trigger": {"text": "suffer", "tokens": ["suffer"], "offsets": [72]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [93]}, {"text": "strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["strategy"], "offsets": [97]}, {"text": "investigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["investigate"], "offsets": [79]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [94]}}, {"event_type": "PUR", "arguments": [{"text": "degree of reliance of existing ece models", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["degree", "of", "reliance", "of", "existing", "ece", "models"], "offsets": [81, 82, 83, 84, 85, 86, 87]}, {"text": "on clause relative positions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "clause", "relative", "positions"], "offsets": [88, 89, 90, 91]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [79]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [118]}, {"text": "performance of existing models", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["performance", "of", "existing", "models"], "offsets": [121, 122, 123, 124]}, {"text": "on such adversarial examples", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "such", "adversarial", "examples"], "offsets": [125, 126, 127, 128]}], "trigger": {"text": "test", "tokens": ["test"], "offsets": [119]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [118]}, {"text": "significant performance drop", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["significant", "performance", "drop"], "offsets": [132, 133, 134]}], "trigger": {"text": "observe", "tokens": ["observe"], "offsets": [130]}}, {"event_type": "PRP", "arguments": [{"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [137]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [142]}, {"text": "graph - based method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["graph", "-", "based", "method"], "offsets": [146, 147, 148, 149]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [143]}}, {"event_type": "PUR", "arguments": [{"text": "dataset bias", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["dataset", "bias"], "offsets": [139, 140]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [137]}}, {"event_type": "MDS", "arguments": [{"text": "commonsense knowledge", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["commonsense", "knowledge"], "offsets": [160, 161]}, {"text": "semantic dependencies", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["semantic", "dependencies"], "offsets": [165, 166]}], "trigger": {"text": "leveraging", "tokens": ["leveraging"], "offsets": [158]}}, {"event_type": "FIN", "arguments": [{"text": "performs", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["performs"], "offsets": [183]}, {"text": "more robust", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["more", "robust"], "offsets": [205, 206]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [178]}}, {"event_type": "CMP", "arguments": [{"text": "graph - based method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["graph", "-", "based", "method"], "offsets": [146, 147, 148, 149]}, {"text": "existing state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [188, 189, 190, 191, 192, 193, 194, 195, 196]}, {"text": "original ece dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["original", "ece", "dataset"], "offsets": [199, 200, 201]}, {"text": "par", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["par"], "offsets": [185]}], "trigger": {"text": "performs", "tokens": ["performs"], "offsets": [183]}}, {"event_type": "CMP", "arguments": [{"text": "existing models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "models"], "offsets": [212, 213]}, {"text": "more robust", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "robust"], "offsets": [205, 206]}, {"text": "adversarial attacks", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["adversarial", "attacks"], "offsets": [208, 209]}], "trigger": {"text": "more robust", "tokens": ["more", "robust"], "offsets": [205, 206]}}], "document": ["the", "emotion", "cause", "extraction", "(", "ece", ")", "task", "aims", "to", "identify", "clauses", "which", "contain", "emotion", "-", "evoking", "information", "for", "a", "particular", "emotion", "expressed", "in", "text", ".", "we", "observe", "that", "a", "widely", "-", "used", "ece", "dataset", "exhibits", "a", "bias", "that", "the", "majority", "of", "annotated", "cause", "clauses", "are", "either", "directly", "before", "their", "associated", "emotion", "clauses", "or", "are", "the", "emotion", "clauses", "themselves", ".", "existing", "models", "for", "ece", "tend", "to", "explore", "such", "relative", "position", "information", "and", "suffer", "from", "the", "dataset", "bias", ".", "to", "investigate", "the", "degree", "of", "reliance", "of", "existing", "ece", "models", "on", "clause", "relative", "positions", ",", "we", "propose", "a", "novel", "strategy", "to", "generate", "adversarial", "examples", "in", "which", "the", "relative", "position", "information", "is", "no", "longer", "the", "indicative", "feature", "of", "cause", "clauses", ".", "we", "test", "the", "performance", "of", "existing", "models", "on", "such", "adversarial", "examples", "and", "observe", "a", "significant", "performance", "drop", ".", "to", "address", "the", "dataset", "bias", ",", "we", "propose", "a", "novel", "graph", "-", "based", "method", "to", "explicitly", "model", "the", "emotion", "triggering", "paths", "by", "leveraging", "the", "commonsense", "knowledge", "to", "enhance", "the", "semantic", "dependencies", "between", "a", "candidate", "clause", "and", "an", "emotion", "clause", ".", "experimental", "results", "show", "that", "our", "proposed", "approach", "performs", "on", "par", "with", "the", "existing", "state", "-", "of", "-", "the", "-", "art", "methods", "on", "the", "original", "ece", "dataset", ",", "and", "is", "more", "robust", "against", "adversarial", "attacks", "compared", "to", "existing", "models", "."]}, {"venue": "ACL", "title": "Representation Learning for Information Extraction from Form-like Documents", "abstract": "We propose a novel approach using representation learning for tackling the problem of extracting structured information from form-like document images. We propose an extraction system that uses knowledge of the types of the target fields to generate extraction candidates and a neural network architecture that learns a dense representation of each candidate based on neighboring words in the document. These learned representations are not only useful in solving the extraction task for unseen document templates from two different domains but are also interpretable, as we show using loss cases.", "doc_id": "494cfb3af3fee93652ed1853c58add46", "publication_year": 2020, "sentences": ["we propose a novel approach using representation learning for tackling the problem of extracting structured information from form - like document images .", "we propose an extraction system that uses knowledge of the types of the target fields to generate extraction candidates and a neural network architecture that learns a dense representation of each candidate based on neighboring words in the document .", "these learned representations are not only useful in solving the extraction task for unseen document templates from two different domains but are also interpretable , as we show using loss cases ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach"], "offsets": [4]}, {"text": "tackling", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["tackling"], "offsets": [9]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "problem of extracting structured information from form - like document images", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["problem", "of", "extracting", "structured", "information", "from", "form", "-", "like", "document", "images"], "offsets": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]}], "trigger": {"text": "tackling", "tokens": ["tackling"], "offsets": [9]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [23]}, {"text": "extraction system", "nugget_type": "APP", "argument_type": "Content", "tokens": ["extraction", "system"], "offsets": [26, 27]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [39]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [24]}}, {"event_type": "PUR", "arguments": [{"text": "extraction candidates", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["extraction", "candidates"], "offsets": [40, 41]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [39]}}], "document": ["we", "propose", "a", "novel", "approach", "using", "representation", "learning", "for", "tackling", "the", "problem", "of", "extracting", "structured", "information", "from", "form", "-", "like", "document", "images", ".", "we", "propose", "an", "extraction", "system", "that", "uses", "knowledge", "of", "the", "types", "of", "the", "target", "fields", "to", "generate", "extraction", "candidates", "and", "a", "neural", "network", "architecture", "that", "learns", "a", "dense", "representation", "of", "each", "candidate", "based", "on", "neighboring", "words", "in", "the", "document", ".", "these", "learned", "representations", "are", "not", "only", "useful", "in", "solving", "the", "extraction", "task", "for", "unseen", "document", "templates", "from", "two", "different", "domains", "but", "are", "also", "interpretable", ",", "as", "we", "show", "using", "loss", "cases", "."]}, {"venue": "ACL", "title": "Learning to Understand Child-directed and Adult-directed Speech", "abstract": "Speech directed to children differs from adult-directed speech in linguistic aspects such as repetition, word choice, and sentence length, as well as in aspects of the speech signal itself, such as prosodic and phonemic variation. Human language acquisition research indicates that child-directed speech helps language learners. This study explores the effect of child-directed speech when learning to extract semantic information from speech directly. We compare the task performance of models trained on adult-directed speech (ADS) and child-directed speech (CDS). We find indications that CDS helps in the initial stages of learning, but eventually, models trained on ADS reach comparable task performance, and generalize better. The results suggest that this is at least partially due to linguistic rather than acoustic properties of the two registers, as we see the same pattern when looking at models trained on acoustically comparable synthetic speech.", "doc_id": "b964ec47972539b3f5fb378030233a07", "publication_year": 2020, "sentences": ["speech directed to children differs from adult - directed speech in linguistic aspects such as repetition , word choice , and sentence length , as well as in aspects of the speech signal itself , such as prosodic and phonemic variation .", "human language acquisition research indicates that child - directed speech helps language learners .", "this study explores the effect of child - directed speech when learning to extract semantic information from speech directly .", "we compare the task performance of models trained on adult - directed speech ( ads ) and child - directed speech ( cds ) .", "we find indications that cds helps in the initial stages of learning , but eventually , models trained on ads reach comparable task performance , and generalize better .", "the results suggest that this is at least partially due to linguistic rather than acoustic properties of the two registers , as we see the same pattern when looking at models trained on acoustically comparable synthetic speech ."], "events": [{"event_type": "ITT", "arguments": [{"text": "child - directed speech", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["child", "-", "directed", "speech"], "offsets": [48, 49, 50, 51]}], "trigger": {"text": "helps", "tokens": ["helps"], "offsets": [52]}}, {"event_type": "WKS", "arguments": [{"text": "when learning to extract semantic information from speech directly", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "learning", "to", "extract", "semantic", "information", "from", "speech", "directly"], "offsets": [66, 67, 68, 69, 70, 71, 72, 73, 74]}, {"text": "effect of child - directed speech", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["effect", "of", "child", "-", "directed", "speech"], "offsets": [60, 61, 62, 63, 64, 65]}], "trigger": {"text": "explores", "tokens": ["explores"], "offsets": [58]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [76]}, {"text": "task performance of models", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["task", "performance", "of", "models"], "offsets": [79, 80, 81, 82]}, {"text": "trained on adult - directed speech", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["trained", "on", "adult", "-", "directed", "speech"], "offsets": [83, 84, 85, 86, 87, 88]}, {"text": "child - directed speech", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["child", "-", "directed", "speech"], "offsets": [93, 94, 95, 96]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [77]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [101]}, {"text": "helps", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["helps"], "offsets": [106]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [102]}}, {"event_type": "FAC", "arguments": [{"text": "cds", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["child", "-", "directed", "speech"], "offsets": [93, 94, 95, 96]}, {"text": "in the initial stages of learning", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "initial", "stages", "of", "learning"], "offsets": [107, 108, 109, 110, 111, 112]}], "trigger": {"text": "helps", "tokens": ["helps"], "offsets": [106]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [101]}, {"text": "reach", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["reach"], "offsets": [121]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [102]}}, {"event_type": "FAC", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["models"], "offsets": [117]}, {"text": "comparable task performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["comparable", "task", "performance"], "offsets": [122, 123, 124]}, {"text": "trained on ads", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["trained", "on", "adult", "-", "directed", "speech"], "offsets": [118, 119, 85, 86, 87, 88]}], "trigger": {"text": "reach", "tokens": ["reach"], "offsets": [121]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [101]}, {"text": "generalize", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["generalize"], "offsets": [127]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [102]}}, {"event_type": "CMP", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["models"], "offsets": [117]}, {"text": "trained on ads", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["trained", "on", "adult", "-", "directed", "speech"], "offsets": [118, 119, 85, 86, 87, 88]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [128]}], "trigger": {"text": "generalize", "tokens": ["generalize"], "offsets": [127]}}], "document": ["speech", "directed", "to", "children", "differs", "from", "adult", "-", "directed", "speech", "in", "linguistic", "aspects", "such", "as", "repetition", ",", "word", "choice", ",", "and", "sentence", "length", ",", "as", "well", "as", "in", "aspects", "of", "the", "speech", "signal", "itself", ",", "such", "as", "prosodic", "and", "phonemic", "variation", ".", "human", "language", "acquisition", "research", "indicates", "that", "child", "-", "directed", "speech", "helps", "language", "learners", ".", "this", "study", "explores", "the", "effect", "of", "child", "-", "directed", "speech", "when", "learning", "to", "extract", "semantic", "information", "from", "speech", "directly", ".", "we", "compare", "the", "task", "performance", "of", "models", "trained", "on", "adult", "-", "directed", "speech", "(", "ads", ")", "and", "child", "-", "directed", "speech", "(", "cds", ")", ".", "we", "find", "indications", "that", "cds", "helps", "in", "the", "initial", "stages", "of", "learning", ",", "but", "eventually", ",", "models", "trained", "on", "ads", "reach", "comparable", "task", "performance", ",", "and", "generalize", "better", ".", "the", "results", "suggest", "that", "this", "is", "at", "least", "partially", "due", "to", "linguistic", "rather", "than", "acoustic", "properties", "of", "the", "two", "registers", ",", "as", "we", "see", "the", "same", "pattern", "when", "looking", "at", "models", "trained", "on", "acoustically", "comparable", "synthetic", "speech", "."]}, {"venue": "ACL", "title": "A Semi-Markov Structured Support Vector Machine Model for High-Precision Named Entity Recognition", "abstract": "Named entity recognition (NER) is the backbone of many NLP solutions. F1 score, the harmonic mean of precision and recall, is often used to select/evaluate the best models. However, when precision needs to be prioritized over recall, a state-of-the-art model might not be the best choice. There is little in literature that directly addresses training-time modifications to achieve higher precision information extraction. In this paper, we propose a neural semi-Markov structured support vector machine model that controls the precision-recall trade-off by assigning weights to different types of errors in the loss-augmented inference during training. The semi-Markov property provides more accurate phrase-level predictions, thereby improving performance. We empirically demonstrate the advantage of our model when high precision is required by comparing against strong baselines based on CRF. In our experiments with the CoNLL 2003 dataset, our model achieves a better precision-recall trade-off at various precision levels.", "doc_id": "2a2e666b1f728373a57b3957deeec596", "publication_year": 2019, "sentences": ["named entity recognition ( ner ) is the backbone of many nlp solutions .", "f1 score , the harmonic mean of precision and recall , is often used to select / evaluate the best models .", "however , when precision needs to be prioritized over recall , a state - of - the - art model might not be the best choice .", "there is little in literature that directly addresses training - time modifications to achieve higher precision information extraction .", "in this paper , we propose a neural semi - markov structured support vector machine model that controls the precision - recall trade - off by assigning weights to different types of errors in the loss - augmented inference during training .", "the semi - markov property provides more accurate phrase - level predictions , thereby improving performance .", "we empirically demonstrate the advantage of our model when high precision is required by comparing against strong baselines based on crf .", "in our experiments with the conll 2003 dataset , our model achieves a better precision - recall trade - off at various precision levels ."], "events": [{"event_type": "ITT", "arguments": [{"text": "named entity recognition", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["named", "entity", "recognition"], "offsets": [0, 1, 2]}], "trigger": {"text": "backbone", "tokens": ["backbone"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "achieve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["achieve"], "offsets": [76]}, {"text": "training - time modifications", "nugget_type": "MOD", "argument_type": "Fault", "tokens": ["training", "-", "time", "modifications"], "offsets": [71, 72, 73, 74]}, {"text": "little", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["little"], "offsets": [65]}], "trigger": {"text": "directly addresses", "tokens": ["directly", "addresses"], "offsets": [69, 70]}}, {"event_type": "PUR", "arguments": [{"text": "higher precision information extraction", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["higher", "precision", "information", "extraction"], "offsets": [77, 78, 79, 80]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [76]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [86]}, {"text": "neural semi - markov structured support vector machine model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neural", "semi", "-", "markov", "structured", "support", "vector", "machine", "model"], "offsets": [89, 90, 91, 92, 93, 94, 95, 96, 97]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [87]}}, {"event_type": "MDS", "arguments": [{"text": "controls", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["controls"], "offsets": [99]}, {"text": "in the loss - augmented inference during training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "loss", "-", "augmented", "inference", "during", "training"], "offsets": [115, 116, 117, 118, 119, 120, 121, 122]}, {"text": "weights", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["weights"], "offsets": [109]}, {"text": "different types of errors", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["different", "types", "of", "errors"], "offsets": [111, 112, 113, 114]}], "trigger": {"text": "assigning", "tokens": ["assigning"], "offsets": [108]}}, {"event_type": "PUR", "arguments": [{"text": "precision - recall trade - off", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["precision", "-", "recall", "trade", "-", "off"], "offsets": [101, 102, 103, 104, 105, 106]}], "trigger": {"text": "controls", "tokens": ["controls"], "offsets": [99]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [141]}, {"text": "comparing", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["comparing"], "offsets": [155]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [143]}}, {"event_type": "CMP", "arguments": [{"text": "strong baselines based on crf", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines", "based", "on", "crf"], "offsets": [157, 158, 159, 160, 161]}, {"text": "advantage", "nugget_type": "STR", "argument_type": "Result", "tokens": ["advantage"], "offsets": [145]}, {"text": "neural semi - markov structured support vector machine model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["neural", "semi", "-", "markov", "structured", "support", "vector", "machine", "model"], "offsets": [89, 90, 91, 92, 93, 94, 95, 96, 97]}, {"text": "when high precision is required", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "high", "precision", "is", "required"], "offsets": [149, 150, 151, 152, 153]}], "trigger": {"text": "comparing", "tokens": ["comparing"], "offsets": [155]}}, {"event_type": "FAC", "arguments": [{"text": "neural semi - markov structured support vector machine model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["neural", "semi", "-", "markov", "structured", "support", "vector", "machine", "model"], "offsets": [89, 90, 91, 92, 93, 94, 95, 96, 97]}, {"text": "at various precision levels", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "various", "precision", "levels"], "offsets": [183, 184, 185, 186]}, {"text": "conll 2003 dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["conll", "2003", "dataset"], "offsets": [168, 169, 170]}, {"text": "better precision - recall trade - off", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["better", "precision", "-", "recall", "trade", "-", "off"], "offsets": [176, 177, 178, 179, 180, 181, 182]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [174]}}], "document": ["named", "entity", "recognition", "(", "ner", ")", "is", "the", "backbone", "of", "many", "nlp", "solutions", ".", "f1", "score", ",", "the", "harmonic", "mean", "of", "precision", "and", "recall", ",", "is", "often", "used", "to", "select", "/", "evaluate", "the", "best", "models", ".", "however", ",", "when", "precision", "needs", "to", "be", "prioritized", "over", "recall", ",", "a", "state", "-", "of", "-", "the", "-", "art", "model", "might", "not", "be", "the", "best", "choice", ".", "there", "is", "little", "in", "literature", "that", "directly", "addresses", "training", "-", "time", "modifications", "to", "achieve", "higher", "precision", "information", "extraction", ".", "in", "this", "paper", ",", "we", "propose", "a", "neural", "semi", "-", "markov", "structured", "support", "vector", "machine", "model", "that", "controls", "the", "precision", "-", "recall", "trade", "-", "off", "by", "assigning", "weights", "to", "different", "types", "of", "errors", "in", "the", "loss", "-", "augmented", "inference", "during", "training", ".", "the", "semi", "-", "markov", "property", "provides", "more", "accurate", "phrase", "-", "level", "predictions", ",", "thereby", "improving", "performance", ".", "we", "empirically", "demonstrate", "the", "advantage", "of", "our", "model", "when", "high", "precision", "is", "required", "by", "comparing", "against", "strong", "baselines", "based", "on", "crf", ".", "in", "our", "experiments", "with", "the", "conll", "2003", "dataset", ",", "our", "model", "achieves", "a", "better", "precision", "-", "recall", "trade", "-", "off", "at", "various", "precision", "levels", "."]}, {"venue": "ACL", "title": "Pyramid: A Layered Model for Nested Named Entity Recognition", "abstract": "This paper presents Pyramid, a novel layered model for Nested Named Entity Recognition (nested NER). In our approach, token or text region embeddings are recursively inputted into L flat NER layers, from bottom to top, stacked in a pyramid shape. Each time an embedding passes through a layer of the pyramid, its length is reduced by one. Its hidden state at layer l represents an l-gram in the input text, which is labeled only if its corresponding text region represents a complete entity mention. We also design an inverse pyramid to allow bidirectional interaction between layers. The proposed method achieves state-of-the-art F1 scores in nested NER on ACE-2004, ACE-2005, GENIA, and NNE, which are 80.27, 79.42, 77.78, and 93.70 with conventional embeddings, and 87.74, 86.34, 79.31, and 94.68 with pre-trained contextualized embeddings. In addition, our model can be used for the more general task of Overlapping Named Entity Recognition. A preliminary experiment confirms the effectiveness of our method in overlapping NER.", "doc_id": "6353b8a9b8a4d09a9b9c9c05f2707351", "publication_year": 2020, "sentences": ["this paper presents pyramid , a novel layered model for nested named entity recognition ( nested ner ) .", "in our approach , token or text region embeddings are recursively inputted into l flat ner layers , from bottom to top , stacked in a pyramid shape .", "each time an embedding passes through a layer of the pyramid , its length is reduced by one .", "its hidden state at layer l represents an l - gram in the input text , which is labeled only if its corresponding text region represents a complete entity mention .", "we also design an inverse pyramid to allow bidirectional interaction between layers .", "the proposed method achieves state - of - the - art f1 scores in nested ner on ace - 2004 , ace - 2005 , genia , and nne , which are 80 . 27 , 79 . 42 , 77 . 78 , and 93 . 70 with conventional embeddings , and 87 . 74 , 86 . 34 , 79 . 31 , and 94 . 68 with pre - trained contextualized embeddings .", "in addition , our model can be used for the more general task of overlapping named entity recognition .", "a preliminary experiment confirms the effectiveness of our method in overlapping ner ."], "events": [{"event_type": "PRP", "arguments": [{"text": "pyramid", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pyramid"], "offsets": [3]}, {"text": "nested named entity recognition", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nested", "named", "entity", "recognition"], "offsets": [10, 11, 12, 13]}], "trigger": {"text": "presents", "tokens": ["presents"], "offsets": [2]}}, {"event_type": "MDS", "arguments": [{"text": "l flat ner layers", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["l", "flat", "ner", "layers"], "offsets": [32, 33, 34, 35]}, {"text": "token", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["token"], "offsets": [23]}, {"text": "text region embeddings", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["text", "region", "embeddings"], "offsets": [25, 26, 27]}], "trigger": {"text": "recursively inputted", "tokens": ["recursively", "inputted"], "offsets": [29, 30]}}, {"event_type": "MDS", "arguments": [{"text": "embedding", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["embedding"], "offsets": [51]}, {"text": "only if its corresponding text region represents a complete entity mention", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["only", "if", "its", "corresponding", "text", "region", "represents", "a", "complete", "entity", "mention"], "offsets": [86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96]}], "trigger": {"text": "labeled", "tokens": ["labeled"], "offsets": [85]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [98]}, {"text": "allow", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["allow"], "offsets": [105]}, {"text": "inverse pyramid", "nugget_type": "APP", "argument_type": "Content", "tokens": ["inverse", "pyramid"], "offsets": [102, 103]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [100]}}, {"event_type": "PUR", "arguments": [{"text": "bidirectional interaction between layers", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["bidirectional", "interaction", "between", "layers"], "offsets": [106, 107, 108, 109]}], "trigger": {"text": "allow", "tokens": ["allow"], "offsets": [105]}}, {"event_type": "CMP", "arguments": [{"text": "pyramid", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["pyramid"], "offsets": [3]}, {"text": "state - of - the - art", "nugget_type": "STR", "argument_type": "Result", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [115, 116, 117, 118, 119, 120, 121]}, {"text": "f1 scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1", "scores"], "offsets": [122, 123]}, {"text": "with conventional embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "conventional", "embeddings"], "offsets": [159, 160, 161]}, {"text": "80 . 27", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["80", ".", "27"], "offsets": [143, 144, 145]}, {"text": "in nested ner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "nested", "ner"], "offsets": [124, 125, 126]}, {"text": "ace - 2004", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ace", "-", "2004"], "offsets": [128, 129, 130]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [114]}}, {"event_type": "CMP", "arguments": [{"text": "pyramid", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["pyramid"], "offsets": [3]}, {"text": "state - of - the - art", "nugget_type": "STR", "argument_type": "Result", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [115, 116, 117, 118, 119, 120, 121]}, {"text": "f1 scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1", "scores"], "offsets": [122, 123]}, {"text": "79 . 42", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["79", ".", "42"], "offsets": [147, 148, 149]}, {"text": "with conventional embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "conventional", "embeddings"], "offsets": [159, 160, 161]}, {"text": "ace - 2005", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ace", "-", "2005"], "offsets": [132, 133, 134]}, {"text": "in nested ner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "nested", "ner"], "offsets": [124, 125, 126]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [114]}}, {"event_type": "CMP", "arguments": [{"text": "pyramid", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["pyramid"], "offsets": [3]}, {"text": "state - of - the - art", "nugget_type": "STR", "argument_type": "Result", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [115, 116, 117, 118, 119, 120, 121]}, {"text": "f1 scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1", "scores"], "offsets": [122, 123]}, {"text": "77 . 78", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["77", ".", "78"], "offsets": [151, 152, 153]}, {"text": "with conventional embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "conventional", "embeddings"], "offsets": [159, 160, 161]}, {"text": "genia", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["genia"], "offsets": [136]}, {"text": "in nested ner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "nested", "ner"], "offsets": [124, 125, 126]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [114]}}, {"event_type": "CMP", "arguments": [{"text": "pyramid", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["pyramid"], "offsets": [3]}, {"text": "state - of - the - art", "nugget_type": "STR", "argument_type": "Result", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [115, 116, 117, 118, 119, 120, 121]}, {"text": "f1 scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1", "scores"], "offsets": [122, 123]}, {"text": "93 . 70", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["93", ".", "70"], "offsets": [156, 157, 158]}, {"text": "with conventional embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "conventional", "embeddings"], "offsets": [159, 160, 161]}, {"text": "nne", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["nne"], "offsets": [139]}, {"text": "in nested ner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "nested", "ner"], "offsets": [124, 125, 126]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [114]}}, {"event_type": "CMP", "arguments": [{"text": "pyramid", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["pyramid"], "offsets": [3]}, {"text": "state - of - the - art", "nugget_type": "STR", "argument_type": "Result", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [115, 116, 117, 118, 119, 120, 121]}, {"text": "f1 scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1", "scores"], "offsets": [122, 123]}, {"text": "87 . 74", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["87", ".", "74"], "offsets": [164, 165, 166]}, {"text": "with pre - trained contextualized embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "pre", "-", "trained", "contextualized", "embeddings"], "offsets": [180, 181, 182, 183, 184, 185]}, {"text": "in nested ner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "nested", "ner"], "offsets": [124, 125, 126]}, {"text": "ace - 2004", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ace", "-", "2004"], "offsets": [128, 129, 130]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [114]}}, {"event_type": "CMP", "arguments": [{"text": "pyramid", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["pyramid"], "offsets": [3]}, {"text": "state - of - the - art", "nugget_type": "STR", "argument_type": "Result", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [115, 116, 117, 118, 119, 120, 121]}, {"text": "f1 scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1", "scores"], "offsets": [122, 123]}, {"text": "86 . 34", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["86", ".", "34"], "offsets": [168, 169, 170]}, {"text": "with pre - trained contextualized embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "pre", "-", "trained", "contextualized", "embeddings"], "offsets": [180, 181, 182, 183, 184, 185]}, {"text": "ace - 2005", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ace", "-", "2005"], "offsets": [132, 133, 134]}, {"text": "in nested ner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "nested", "ner"], "offsets": [124, 125, 126]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [114]}}, {"event_type": "CMP", "arguments": [{"text": "pyramid", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["pyramid"], "offsets": [3]}, {"text": "state - of - the - art", "nugget_type": "STR", "argument_type": "Result", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [115, 116, 117, 118, 119, 120, 121]}, {"text": "f1 scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1", "scores"], "offsets": [122, 123]}, {"text": "79 . 31", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["79", ".", "31"], "offsets": [172, 173, 174]}, {"text": "with pre - trained contextualized embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "pre", "-", "trained", "contextualized", "embeddings"], "offsets": [180, 181, 182, 183, 184, 185]}, {"text": "in nested ner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "nested", "ner"], "offsets": [124, 125, 126]}, {"text": "genia", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["genia"], "offsets": [136]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [114]}}, {"event_type": "CMP", "arguments": [{"text": "pyramid", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["pyramid"], "offsets": [3]}, {"text": "state - of - the - art", "nugget_type": "STR", "argument_type": "Result", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [115, 116, 117, 118, 119, 120, 121]}, {"text": "f1 scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1", "scores"], "offsets": [122, 123]}, {"text": "94 . 68", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["94", ".", "68"], "offsets": [177, 178, 179]}, {"text": "with pre - trained contextualized embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "pre", "-", "trained", "contextualized", "embeddings"], "offsets": [180, 181, 182, 183, 184, 185]}, {"text": "in nested ner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "nested", "ner"], "offsets": [124, 125, 126]}, {"text": "nne", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["nne"], "offsets": [139]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [114]}}, {"event_type": "FAC", "arguments": [{"text": "in overlapping ner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "overlapping", "ner"], "offsets": [215, 216, 217]}, {"text": "pyramid", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["pyramid"], "offsets": [3]}, {"text": "effectiveness", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["effectiveness"], "offsets": [211]}], "trigger": {"text": "confirms", "tokens": ["confirms"], "offsets": [209]}}], "document": ["this", "paper", "presents", "pyramid", ",", "a", "novel", "layered", "model", "for", "nested", "named", "entity", "recognition", "(", "nested", "ner", ")", ".", "in", "our", "approach", ",", "token", "or", "text", "region", "embeddings", "are", "recursively", "inputted", "into", "l", "flat", "ner", "layers", ",", "from", "bottom", "to", "top", ",", "stacked", "in", "a", "pyramid", "shape", ".", "each", "time", "an", "embedding", "passes", "through", "a", "layer", "of", "the", "pyramid", ",", "its", "length", "is", "reduced", "by", "one", ".", "its", "hidden", "state", "at", "layer", "l", "represents", "an", "l", "-", "gram", "in", "the", "input", "text", ",", "which", "is", "labeled", "only", "if", "its", "corresponding", "text", "region", "represents", "a", "complete", "entity", "mention", ".", "we", "also", "design", "an", "inverse", "pyramid", "to", "allow", "bidirectional", "interaction", "between", "layers", ".", "the", "proposed", "method", "achieves", "state", "-", "of", "-", "the", "-", "art", "f1", "scores", "in", "nested", "ner", "on", "ace", "-", "2004", ",", "ace", "-", "2005", ",", "genia", ",", "and", "nne", ",", "which", "are", "80", ".", "27", ",", "79", ".", "42", ",", "77", ".", "78", ",", "and", "93", ".", "70", "with", "conventional", "embeddings", ",", "and", "87", ".", "74", ",", "86", ".", "34", ",", "79", ".", "31", ",", "and", "94", ".", "68", "with", "pre", "-", "trained", "contextualized", "embeddings", ".", "in", "addition", ",", "our", "model", "can", "be", "used", "for", "the", "more", "general", "task", "of", "overlapping", "named", "entity", "recognition", ".", "a", "preliminary", "experiment", "confirms", "the", "effectiveness", "of", "our", "method", "in", "overlapping", "ner", "."]}, {"venue": "ACL", "title": "PASS: Perturb-and-Select Summarizer for Product Reviews", "abstract": "The product reviews summarization task aims to automatically produce a short summary for a set of reviews of a given product. Such summaries are expected to aggregate a range of different opinions in a concise, coherent and informative manner. This challenging task gives rise to two shortcomings in existing work. First, summarizers tend to favor generic content that appears in reviews for many different products, resulting in template-like, less informative summaries. Second, as reviewers often disagree on the pros and cons of a given product, summarizers sometimes yield inconsistent, self-contradicting summaries. We propose the PASS system (Perturb-and-Select Summarizer) that employs a large pre-trained Transformer-based model (T5 in our case), which follows a few-shot fine-tuning scheme. A key component of the PASS system relies on applying systematic perturbations to the model\u2019s input during inference, which allows it to generate multiple different summaries per product. We develop a method for ranking these summaries according to desired criteria, coherence in our case, enabling our system to almost entirely avoid the problem of self-contradiction. We compare our system against strong baselines on publicly available datasets, and show that it produces summaries which are more informative, diverse and coherent.", "doc_id": "875fa8ff3bef8e1e5c951cca55f1d041", "publication_year": 2021, "sentences": ["the product reviews summarization task aims to automatically produce a short summary for a set of reviews of a given product .", "such summaries are expected to aggregate a range of different opinions in a concise , coherent and informative manner .", "this challenging task gives rise to two shortcomings in existing work .", "first , summarizers tend to favor generic content that appears in reviews for many different products , resulting in template - like , less informative summaries .", "second , as reviewers often disagree on the pros and cons of a given product , summarizers sometimes yield inconsistent , self - contradicting summaries .", "we propose the pass system ( perturb - and - select summarizer ) that employs a large pre - trained transformer - based model ( t5 in our case ) , which follows a few - shot fine - tuning scheme .", "a key component of the pass system relies on applying systematic perturbations to the model \u2019 s input during inference , which allows it to generate multiple different summaries per product .", "we develop a method for ranking these summaries according to desired criteria , coherence in our case , enabling our system to almost entirely avoid the problem of self - contradiction .", "we compare our system against strong baselines on publicly available datasets , and show that it produces summaries which are more informative , diverse and coherent ."], "events": [{"event_type": "ITT", "arguments": [{"text": "product reviews summarization task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["product", "reviews", "summarization", "task"], "offsets": [1, 2, 3, 4]}], "trigger": {"text": "automatically produce", "tokens": ["automatically", "produce"], "offsets": [7, 8]}}, {"event_type": "RWF", "arguments": [{"text": "less informative", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["less", "informative"], "offsets": [77, 78]}, {"text": "summaries", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["summaries"], "offsets": [79]}], "trigger": {"text": "resulting", "tokens": ["resulting"], "offsets": [71]}}, {"event_type": "RWF", "arguments": [{"text": "summaries", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["summaries"], "offsets": [105]}, {"text": "inconsistent", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["inconsistent"], "offsets": [100]}, {"text": "self - contradicting", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["self", "-", "contradicting"], "offsets": [102, 103, 104]}], "trigger": {"text": "yield", "tokens": ["yield"], "offsets": [99]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [107]}, {"text": "pass system", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pass", "system"], "offsets": [110, 111]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [108]}}, {"event_type": "WKS", "arguments": [{"text": "large pre - trained transformer - based model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["large", "pre", "-", "trained", "transformer", "-", "based", "model"], "offsets": [123, 124, 125, 126, 127, 128, 129, 130]}], "trigger": {"text": "employs", "tokens": ["employs"], "offsets": [121]}}, {"event_type": "WKS", "arguments": [{"text": "few - shot fine - tuning scheme", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["few", "-", "shot", "fine", "-", "tuning", "scheme"], "offsets": [141, 142, 143, 144, 145, 146, 147]}], "trigger": {"text": "follows", "tokens": ["follows"], "offsets": [139]}}, {"event_type": "WKS", "arguments": [{"text": "systematic perturbations to the model \u2019 s input", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["systematic", "perturbations", "to", "the", "model", "\u2019", "s", "input"], "offsets": [159, 160, 161, 162, 163, 164, 165, 166]}, {"text": "during inference", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "inference"], "offsets": [167, 168]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [174]}, {"text": "per product", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["per", "product"], "offsets": [178, 179]}], "trigger": {"text": "applying", "tokens": ["applying"], "offsets": [158]}}, {"event_type": "PUR", "arguments": [{"text": "multiple different summaries", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["multiple", "different", "summaries"], "offsets": [175, 176, 177]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [174]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [181]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [184]}, {"text": "ranking", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["ranking"], "offsets": [186]}, {"text": "coherence", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["coherence"], "offsets": [194]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [182]}}, {"event_type": "PUR", "arguments": [{"text": "according to desired criteria", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["according", "to", "desired", "criteria"], "offsets": [189, 190, 191, 192]}, {"text": "multiple different summaries", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["multiple", "different", "summaries"], "offsets": [175, 176, 177]}], "trigger": {"text": "ranking", "tokens": ["ranking"], "offsets": [186]}}, {"event_type": "FAC", "arguments": [{"text": "pass system", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["pass", "system"], "offsets": [110, 111]}, {"text": "almost entirely", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["almost", "entirely"], "offsets": [203, 204]}, {"text": "problem of self - contradiction", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["problem", "of", "self", "-", "contradiction"], "offsets": [207, 208, 209, 210, 211]}], "trigger": {"text": "avoid", "tokens": ["avoid"], "offsets": [205]}}, {"event_type": "CMP", "arguments": [{"text": "pass system", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["pass", "system"], "offsets": [110, 111]}, {"text": "strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines"], "offsets": [218, 219]}, {"text": "publicly available datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["publicly", "available", "datasets"], "offsets": [221, 222, 223]}, {"text": "produces summaries", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["produces", "summaries"], "offsets": [229, 230]}, {"text": "more", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more"], "offsets": [233]}], "trigger": {"text": "informative , diverse and coherent", "tokens": ["informative", ",", "diverse", "and", "coherent"], "offsets": [234, 235, 236, 237, 238]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [213]}, {"text": "informative , diverse and coherent", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["informative", "diverse", "and", "coherent"], "offsets": [234, 236, 237, 238]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [226]}}], "document": ["the", "product", "reviews", "summarization", "task", "aims", "to", "automatically", "produce", "a", "short", "summary", "for", "a", "set", "of", "reviews", "of", "a", "given", "product", ".", "such", "summaries", "are", "expected", "to", "aggregate", "a", "range", "of", "different", "opinions", "in", "a", "concise", ",", "coherent", "and", "informative", "manner", ".", "this", "challenging", "task", "gives", "rise", "to", "two", "shortcomings", "in", "existing", "work", ".", "first", ",", "summarizers", "tend", "to", "favor", "generic", "content", "that", "appears", "in", "reviews", "for", "many", "different", "products", ",", "resulting", "in", "template", "-", "like", ",", "less", "informative", "summaries", ".", "second", ",", "as", "reviewers", "often", "disagree", "on", "the", "pros", "and", "cons", "of", "a", "given", "product", ",", "summarizers", "sometimes", "yield", "inconsistent", ",", "self", "-", "contradicting", "summaries", ".", "we", "propose", "the", "pass", "system", "(", "perturb", "-", "and", "-", "select", "summarizer", ")", "that", "employs", "a", "large", "pre", "-", "trained", "transformer", "-", "based", "model", "(", "t5", "in", "our", "case", ")", ",", "which", "follows", "a", "few", "-", "shot", "fine", "-", "tuning", "scheme", ".", "a", "key", "component", "of", "the", "pass", "system", "relies", "on", "applying", "systematic", "perturbations", "to", "the", "model", "\u2019", "s", "input", "during", "inference", ",", "which", "allows", "it", "to", "generate", "multiple", "different", "summaries", "per", "product", ".", "we", "develop", "a", "method", "for", "ranking", "these", "summaries", "according", "to", "desired", "criteria", ",", "coherence", "in", "our", "case", ",", "enabling", "our", "system", "to", "almost", "entirely", "avoid", "the", "problem", "of", "self", "-", "contradiction", ".", "we", "compare", "our", "system", "against", "strong", "baselines", "on", "publicly", "available", "datasets", ",", "and", "show", "that", "it", "produces", "summaries", "which", "are", "more", "informative", ",", "diverse", "and", "coherent", "."]}, {"venue": "ACL", "title": "Neural Machine Translation with Reordering Embeddings", "abstract": "The reordering model plays an important role in phrase-based statistical machine translation. However, there are few works that exploit the reordering information in neural machine translation. In this paper, we propose a reordering mechanism to learn the reordering embedding of a word based on its contextual information. These learned reordering embeddings are stacked together with self-attention networks to learn sentence representation for machine translation. The reordering mechanism can be easily integrated into both the encoder and the decoder in the Transformer translation system. Experimental results on WMT\u201914 English-to-German, NIST Chinese-to-English, and WAT Japanese-to-English translation tasks demonstrate that the proposed methods can significantly improve the performance of the Transformer.", "doc_id": "33463835bc05e51c98c465de2017d98a", "publication_year": 2019, "sentences": ["the reordering model plays an important role in phrase - based statistical machine translation .", "however , there are few works that exploit the reordering information in neural machine translation .", "in this paper , we propose a reordering mechanism to learn the reordering embedding of a word based on its contextual information .", "these learned reordering embeddings are stacked together with self - attention networks to learn sentence representation for machine translation .", "the reordering mechanism can be easily integrated into both the encoder and the decoder in the transformer translation system .", "experimental results on wmt \u2019 14 english - to - german , nist chinese - to - english , and wat japanese - to - english translation tasks demonstrate that the proposed methods can significantly improve the performance of the transformer ."], "events": [{"event_type": "ITT", "arguments": [{"text": "in phrase - based statistical machine translation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "phrase", "-", "based", "statistical", "machine", "translation"], "offsets": [7, 8, 9, 10, 11, 12, 13]}], "trigger": {"text": "plays", "tokens": ["plays"], "offsets": [3]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [35]}, {"text": "reordering mechanism", "nugget_type": "APP", "argument_type": "Content", "tokens": ["reordering", "mechanism"], "offsets": [38, 39]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [41]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [36]}}, {"event_type": "PUR", "arguments": [{"text": "reordering embedding of a word based on its contextual information", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["reordering", "embedding", "of", "a", "word", "based", "on", "its", "contextual", "information"], "offsets": [43, 44, 45, 46, 47, 48, 49, 50, 51, 52]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [41]}}, {"event_type": "MDS", "arguments": [{"text": "self - attention networks", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["self", "-", "attention", "networks"], "offsets": [62, 63, 64, 65]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [67]}, {"text": "learned reordering embeddings", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["learned", "reordering", "embeddings"], "offsets": [55, 56, 57]}], "trigger": {"text": "stacked together", "tokens": ["stacked", "together"], "offsets": [59, 60]}}, {"event_type": "PUR", "arguments": [{"text": "sentence representation for machine translation", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["sentence", "representation", "for", "machine", "translation"], "offsets": [68, 69, 70, 71, 72]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [67]}}, {"event_type": "MDS", "arguments": [{"text": "reordering mechanism", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["reordering", "mechanism"], "offsets": [75, 76]}, {"text": "encoder", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["encoder"], "offsets": [84]}, {"text": "decoder", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["decoder"], "offsets": [87]}, {"text": "in the transformer translation system", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "transformer", "translation", "system"], "offsets": [88, 89, 90, 91, 92]}], "trigger": {"text": "easily integrated", "tokens": ["easily", "integrated"], "offsets": [79, 80]}}, {"event_type": "CMP", "arguments": [{"text": "reordering mechanism", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["reordering", "mechanism"], "offsets": [38, 39]}, {"text": "significantly improve", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significantly", "improve"], "offsets": [128, 129]}], "trigger": {"text": "significantly improve", "tokens": ["significantly", "improve"], "offsets": [128, 129]}}], "document": ["the", "reordering", "model", "plays", "an", "important", "role", "in", "phrase", "-", "based", "statistical", "machine", "translation", ".", "however", ",", "there", "are", "few", "works", "that", "exploit", "the", "reordering", "information", "in", "neural", "machine", "translation", ".", "in", "this", "paper", ",", "we", "propose", "a", "reordering", "mechanism", "to", "learn", "the", "reordering", "embedding", "of", "a", "word", "based", "on", "its", "contextual", "information", ".", "these", "learned", "reordering", "embeddings", "are", "stacked", "together", "with", "self", "-", "attention", "networks", "to", "learn", "sentence", "representation", "for", "machine", "translation", ".", "the", "reordering", "mechanism", "can", "be", "easily", "integrated", "into", "both", "the", "encoder", "and", "the", "decoder", "in", "the", "transformer", "translation", "system", ".", "experimental", "results", "on", "wmt", "\u2019", "14", "english", "-", "to", "-", "german", ",", "nist", "chinese", "-", "to", "-", "english", ",", "and", "wat", "japanese", "-", "to", "-", "english", "translation", "tasks", "demonstrate", "that", "the", "proposed", "methods", "can", "significantly", "improve", "the", "performance", "of", "the", "transformer", "."]}, {"venue": "ACL", "title": "Can a Transformer Pass the Wug Test? Tuning Copying Bias in Neural Morphological Inflection Models", "abstract": "Deep learning sequence models have been successful with morphological inflection generation. The SIGMORPHON shared task results in the past several years indicate that such models can perform well, but only if the training data covers a good amount of different lemmata, or if the lemmata to be inflected at test time have also been seen in training, as has indeed been largely the case in these tasks. Surprisingly, we find that standard models such as the Transformer almost completely fail at generalizing inflection patterns when trained on a limited number of lemmata and asked to inflect previously unseen lemmata\u2014i.e. under \u201cwug test\u201d-like circumstances. This is true even though the actual number of training examples is very large. While established data augmentation techniques can be employed to alleviate this shortcoming by introducing a copying bias through hallucinating synthetic new word forms using the alphabet in the language at hand, our experiment results show that, to be more effective, the hallucination process needs to pay attention to substrings of syllable-like length rather than individual characters.", "doc_id": "6efd024a29584955173f2601058610eb", "publication_year": 2022, "sentences": ["deep learning sequence models have been successful with morphological inflection generation .", "the sigmorphon shared task results in the past several years indicate that such models can perform well , but only if the training data covers a good amount of different lemmata , or if the lemmata to be inflected at test time have also been seen in training , as has indeed been largely the case in these tasks .", "surprisingly , we find that standard models such as the transformer almost completely fail at generalizing inflection patterns when trained on a limited number of lemmata and asked to inflect previously unseen lemmata \u2014 i . e .", "under \u201c wug test \u201d - like circumstances .", "this is true even though the actual number of training examples is very large .", "while established data augmentation techniques can be employed to alleviate this shortcoming by introducing a copying bias through hallucinating synthetic new word forms using the alphabet in the language at hand , our experiment results show that , to be more effective , the hallucination process needs to pay attention to substrings of syllable - like length rather than individual characters ."], "events": [{"event_type": "ITT", "arguments": [{"text": "morphological inflection generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["morphological", "inflection", "generation"], "offsets": [8, 9, 10]}], "trigger": {"text": "successful", "tokens": ["successful"], "offsets": [6]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [74]}, {"text": "completely fail", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["completely", "fail"], "offsets": [84, 85]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [75]}}, {"event_type": "FAC", "arguments": [{"text": "standard models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["standard", "models"], "offsets": [77, 78]}, {"text": "generalizing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generalizing"], "offsets": [87]}, {"text": "when trained on a limited number of lemmata and asked to inflect previously unseen lemmata", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "trained", "on", "a", "limited", "number", "of", "lemmata", "and", "asked", "to", "inflect", "previously", "unseen", "lemmata"], "offsets": [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104]}], "trigger": {"text": "completely fail", "tokens": ["completely", "fail"], "offsets": [84, 85]}}, {"event_type": "FIN", "arguments": [{"text": "pay attention", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["pay", "attention"], "offsets": [182, 183]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [169]}}, {"event_type": "FAC", "arguments": [{"text": "hallucination process", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["hallucination", "process"], "offsets": [178, 179]}, {"text": "substrings of syllable - like length", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["substrings", "of", "syllable", "-", "like", "length"], "offsets": [185, 186, 187, 188, 189, 190]}, {"text": "more effective", "nugget_type": "STR", "argument_type": "Target", "tokens": ["more", "effective"], "offsets": [174, 175]}], "trigger": {"text": "pay attention", "tokens": ["pay", "attention"], "offsets": [182, 183]}}, {"event_type": "PUR", "arguments": [{"text": "inflection patterns", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["inflection", "patterns"], "offsets": [88, 89]}], "trigger": {"text": "generalizing", "tokens": ["generalizing"], "offsets": [87]}}, {"event_type": "RWF", "arguments": [{"text": "training data", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["training", "data"], "offsets": [34, 35]}, {"text": "good amount of different lemmata", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["good", "amount", "of", "different", "lemmata"], "offsets": [38, 39, 40, 41, 42]}], "trigger": {"text": "covers", "tokens": ["covers"], "offsets": [36]}}, {"event_type": "RWF", "arguments": [{"text": "in training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "training"], "offsets": [58, 59]}, {"text": "lemmata to be inflected", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["lemmata", "to", "be", "inflected"], "offsets": [47, 48, 49, 50]}], "trigger": {"text": "seen", "tokens": ["seen"], "offsets": [57]}}, {"event_type": "RWF", "arguments": [{"text": "actual number of training examples", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["actual", "number", "of", "training", "examples"], "offsets": [125, 126, 127, 128, 129]}], "trigger": {"text": "very large", "tokens": ["very", "large"], "offsets": [131, 132]}}, {"event_type": "RWS", "arguments": [{"text": "established data augmentation techniques", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["established", "data", "augmentation", "techniques"], "offsets": [135, 136, 137, 138]}, {"text": "alleviate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["alleviate"], "offsets": [143]}, {"text": "copying bias", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["copying", "bias"], "offsets": [149, 150]}], "trigger": {"text": "introducing", "tokens": ["introducing"], "offsets": [147]}}, {"event_type": "PUR", "arguments": [{"text": "this shortcoming", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["this", "shortcoming"], "offsets": [144, 145]}], "trigger": {"text": "alleviate", "tokens": ["alleviate"], "offsets": [143]}}, {"event_type": "RWS", "arguments": [{"text": "alphabet", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["alphabet"], "offsets": [159]}, {"text": "synthetic new word forms", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["synthetic", "new", "word", "forms"], "offsets": [153, 154, 155, 156]}], "trigger": {"text": "hallucinating", "tokens": ["hallucinating"], "offsets": [152]}}], "document": ["deep", "learning", "sequence", "models", "have", "been", "successful", "with", "morphological", "inflection", "generation", ".", "the", "sigmorphon", "shared", "task", "results", "in", "the", "past", "several", "years", "indicate", "that", "such", "models", "can", "perform", "well", ",", "but", "only", "if", "the", "training", "data", "covers", "a", "good", "amount", "of", "different", "lemmata", ",", "or", "if", "the", "lemmata", "to", "be", "inflected", "at", "test", "time", "have", "also", "been", "seen", "in", "training", ",", "as", "has", "indeed", "been", "largely", "the", "case", "in", "these", "tasks", ".", "surprisingly", ",", "we", "find", "that", "standard", "models", "such", "as", "the", "transformer", "almost", "completely", "fail", "at", "generalizing", "inflection", "patterns", "when", "trained", "on", "a", "limited", "number", "of", "lemmata", "and", "asked", "to", "inflect", "previously", "unseen", "lemmata", "\u2014", "i", ".", "e", ".", "under", "\u201c", "wug", "test", "\u201d", "-", "like", "circumstances", ".", "this", "is", "true", "even", "though", "the", "actual", "number", "of", "training", "examples", "is", "very", "large", ".", "while", "established", "data", "augmentation", "techniques", "can", "be", "employed", "to", "alleviate", "this", "shortcoming", "by", "introducing", "a", "copying", "bias", "through", "hallucinating", "synthetic", "new", "word", "forms", "using", "the", "alphabet", "in", "the", "language", "at", "hand", ",", "our", "experiment", "results", "show", "that", ",", "to", "be", "more", "effective", ",", "the", "hallucination", "process", "needs", "to", "pay", "attention", "to", "substrings", "of", "syllable", "-", "like", "length", "rather", "than", "individual", "characters", "."]}, {"venue": "ACL", "title": "Contextual Fine-to-Coarse Distillation for Coarse-grained Response Selection in Open-Domain Conversations", "abstract": "We study the problem of coarse-grained response selection in retrieval-based dialogue systems. The problem is equally important with fine-grained response selection, but is less explored in existing literature. In this paper, we propose a Contextual Fine-to-Coarse (CFC) distilled model for coarse-grained response selection in open-domain conversations. In our CFC model, dense representations of query, candidate contexts and responses is learned based on the multi-tower architecture using contextual matching, and richer knowledge learned from the one-tower architecture (fine-grained) is distilled into the multi-tower architecture (coarse-grained) to enhance the performance of the retriever. To evaluate the performance of the proposed model, we construct two new datasets based on the Reddit comments dump and Twitter corpus. Extensive experimental results on the two datasets show that the proposed method achieves huge improvement over all evaluation metrics compared with traditional baseline methods.", "doc_id": "a5f0cfddb0aa5631d2285355f344a579", "publication_year": 2022, "sentences": ["we study the problem of coarse - grained response selection in retrieval - based dialogue systems .", "the problem is equally important with fine - grained response selection , but is less explored in existing literature .", "in this paper , we propose a contextual fine - to - coarse ( cfc ) distilled model for coarse - grained response selection in open - domain conversations .", "in our cfc model , dense representations of query , candidate contexts and responses is learned based on the multi - tower architecture using contextual matching , and richer knowledge learned from the one - tower architecture ( fine - grained ) is distilled into the multi - tower architecture ( coarse - grained ) to enhance the performance of the retriever .", "to evaluate the performance of the proposed model , we construct two new datasets based on the reddit comments dump and twitter corpus .", "extensive experimental results on the two datasets show that the proposed method achieves huge improvement over all evaluation metrics compared with traditional baseline methods ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "retrieval - based dialogue systems", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["retrieval", "-", "based", "dialogue", "systems"], "offsets": [11, 12, 13, 14, 15]}, {"text": "problem of coarse - grained response selection", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["problem", "of", "coarse", "-", "grained", "response", "selection"], "offsets": [3, 4, 5, 6, 7, 8, 9]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [1]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [41]}, {"text": "contextual fine - to - coarse ( cfc ) distilled model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["contextual", "fine", "-", "to", "-", "coarse", "distilled", "model"], "offsets": [44, 45, 46, 47, 48, 49, 53, 54]}, {"text": "coarse - grained response selection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["coarse", "-", "grained", "response", "selection"], "offsets": [56, 57, 58, 59, 60]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [42]}}, {"event_type": "MDS", "arguments": [{"text": "dense representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["dense", "representations"], "offsets": [72, 73]}, {"text": "contextual matching", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["contextual", "matching"], "offsets": [91, 92]}, {"text": "based on the multi - tower architecture", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "the", "multi", "-", "tower", "architecture"], "offsets": [83, 84, 85, 86, 87, 88, 89]}], "trigger": {"text": "learned", "tokens": ["learned"], "offsets": [82]}}, {"event_type": "MDS", "arguments": [{"text": "richer knowledge", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["richer", "knowledge"], "offsets": [95, 96]}, {"text": "multi - tower architecture", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["multi", "-", "tower", "architecture"], "offsets": [113, 114, 115, 116]}, {"text": "enhance", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enhance"], "offsets": [123]}, {"text": "one - tower architecture", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["one", "-", "tower", "architecture"], "offsets": [100, 101, 102, 103]}], "trigger": {"text": "distilled", "tokens": ["distilled"], "offsets": [110]}}, {"event_type": "PUR", "arguments": [{"text": "performance of the retriever", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["performance", "of", "the", "retriever"], "offsets": [125, 126, 127, 128]}], "trigger": {"text": "enhance", "tokens": ["enhance"], "offsets": [123]}}, {"event_type": "PUR", "arguments": [{"text": "performance of the proposed model", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["performance", "of", "the", "proposed", "model"], "offsets": [133, 134, 135, 136, 137]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [131]}}, {"event_type": "CMP", "arguments": [{"text": "traditional baseline methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["traditional", "baseline", "methods"], "offsets": [175, 176, 177]}, {"text": "huge improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["huge", "improvement"], "offsets": [167, 168]}, {"text": "evaluation metrics", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["evaluation", "metrics"], "offsets": [171, 172]}, {"text": "two datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "datasets"], "offsets": [159, 160]}, {"text": "proposed method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["proposed", "method"], "offsets": [164, 165]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [166]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [139]}, {"text": "two new datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["two", "new", "datasets"], "offsets": [141, 142, 143]}, {"text": "evaluate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluate"], "offsets": [131]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [140]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["achieves"], "offsets": [166]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [161]}}], "document": ["we", "study", "the", "problem", "of", "coarse", "-", "grained", "response", "selection", "in", "retrieval", "-", "based", "dialogue", "systems", ".", "the", "problem", "is", "equally", "important", "with", "fine", "-", "grained", "response", "selection", ",", "but", "is", "less", "explored", "in", "existing", "literature", ".", "in", "this", "paper", ",", "we", "propose", "a", "contextual", "fine", "-", "to", "-", "coarse", "(", "cfc", ")", "distilled", "model", "for", "coarse", "-", "grained", "response", "selection", "in", "open", "-", "domain", "conversations", ".", "in", "our", "cfc", "model", ",", "dense", "representations", "of", "query", ",", "candidate", "contexts", "and", "responses", "is", "learned", "based", "on", "the", "multi", "-", "tower", "architecture", "using", "contextual", "matching", ",", "and", "richer", "knowledge", "learned", "from", "the", "one", "-", "tower", "architecture", "(", "fine", "-", "grained", ")", "is", "distilled", "into", "the", "multi", "-", "tower", "architecture", "(", "coarse", "-", "grained", ")", "to", "enhance", "the", "performance", "of", "the", "retriever", ".", "to", "evaluate", "the", "performance", "of", "the", "proposed", "model", ",", "we", "construct", "two", "new", "datasets", "based", "on", "the", "reddit", "comments", "dump", "and", "twitter", "corpus", ".", "extensive", "experimental", "results", "on", "the", "two", "datasets", "show", "that", "the", "proposed", "method", "achieves", "huge", "improvement", "over", "all", "evaluation", "metrics", "compared", "with", "traditional", "baseline", "methods", "."]}, {"venue": "ACL", "title": "Higher-order Derivatives of Weighted Finite-state Machines", "abstract": "Weighted finite-state machines are a fundamental building block of NLP systems. They have withstood the test of time\u2014from their early use in noisy channel models in the 1990s up to modern-day neurally parameterized conditional random fields. This work examines the computation of higher-order derivatives with respect to the normalization constant for weighted finite-state machines. We provide a general algorithm for evaluating derivatives of all orders, which has not been previously described in the literature. In the case of second-order derivatives, our scheme runs in the optimal O(A\u02c62 N\u02c64) time where A is the alphabet size and N is the number of states. Our algorithm is significantly faster than prior algorithms. Additionally, our approach leads to a significantly faster algorithm for computing second-order expectations, such as covariance matrices and gradients of first-order expectations.", "doc_id": "f05f30117290a6b53886ac993dbbe677", "publication_year": 2021, "sentences": ["weighted finite - state machines are a fundamental building block of nlp systems .", "they have withstood the test of time \u2014 from their early use in noisy channel models in the 1990s up to modern - day neurally parameterized conditional random fields .", "this work examines the computation of higher - order derivatives with respect to the normalization constant for weighted finite - state machines .", "we provide a general algorithm for evaluating derivatives of all orders , which has not been previously described in the literature .", "in the case of second - order derivatives , our scheme runs in the optimal o ( [UNK] [UNK] ) time where a is the alphabet size and n is the number of states .", "our algorithm is significantly faster than prior algorithms .", "additionally , our approach leads to a significantly faster algorithm for computing second - order expectations , such as covariance matrices and gradients of first - order expectations ."], "events": [{"event_type": "ITT", "arguments": [{"text": "weighted finite - state machines", "nugget_type": "APP", "argument_type": "Target", "tokens": ["weighted", "finite", "-", "state", "machines"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "block", "tokens": ["block"], "offsets": [9]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [67]}, {"text": "general algorithm", "nugget_type": "APP", "argument_type": "Content", "tokens": ["general", "algorithm"], "offsets": [70, 71]}, {"text": "evaluating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluating"], "offsets": [73]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [68]}}, {"event_type": "PUR", "arguments": [{"text": "derivatives of all orders", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["derivatives", "of", "all", "orders"], "offsets": [74, 75, 76, 77]}], "trigger": {"text": "evaluating", "tokens": ["evaluating"], "offsets": [73]}}, {"event_type": "CMP", "arguments": [{"text": "significantly faster", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significantly", "faster"], "offsets": [127, 128]}, {"text": "prior algorithms", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["prior", "algorithms"], "offsets": [130, 131]}], "trigger": {"text": "significantly faster", "tokens": ["significantly", "faster"], "offsets": [127, 128]}}, {"event_type": "FAC", "arguments": [{"text": "computing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["computing"], "offsets": [144]}, {"text": "general algorithm", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["general", "algorithm"], "offsets": [70, 71]}, {"text": "significantly faster algorithm", "nugget_type": "STR", "argument_type": "Object", "tokens": ["significantly", "faster", "algorithm"], "offsets": [140, 141, 142]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [137]}}, {"event_type": "PUR", "arguments": [{"text": "second - order expectations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["second", "-", "order", "expectations"], "offsets": [145, 146, 147, 148]}], "trigger": {"text": "computing", "tokens": ["computing"], "offsets": [144]}}, {"event_type": "WKS", "arguments": [{"text": "computation of higher - order derivatives", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["computation", "of", "higher", "-", "order", "derivatives"], "offsets": [48, 49, 50, 51, 52, 53]}, {"text": "with respect to the normalization constant", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "respect", "to", "the", "normalization", "constant"], "offsets": [54, 55, 56, 57, 58, 59]}, {"text": "weighted finite - state machines", "nugget_type": "APP", "argument_type": "Target", "tokens": ["weighted", "finite", "-", "state", "machines"], "offsets": [61, 62, 63, 64, 65]}], "trigger": {"text": "examines", "tokens": ["examines"], "offsets": [46]}}], "document": ["weighted", "finite", "-", "state", "machines", "are", "a", "fundamental", "building", "block", "of", "nlp", "systems", ".", "they", "have", "withstood", "the", "test", "of", "time", "\u2014", "from", "their", "early", "use", "in", "noisy", "channel", "models", "in", "the", "1990s", "up", "to", "modern", "-", "day", "neurally", "parameterized", "conditional", "random", "fields", ".", "this", "work", "examines", "the", "computation", "of", "higher", "-", "order", "derivatives", "with", "respect", "to", "the", "normalization", "constant", "for", "weighted", "finite", "-", "state", "machines", ".", "we", "provide", "a", "general", "algorithm", "for", "evaluating", "derivatives", "of", "all", "orders", ",", "which", "has", "not", "been", "previously", "described", "in", "the", "literature", ".", "in", "the", "case", "of", "second", "-", "order", "derivatives", ",", "our", "scheme", "runs", "in", "the", "optimal", "o", "(", "[UNK]", "[UNK]", ")", "time", "where", "a", "is", "the", "alphabet", "size", "and", "n", "is", "the", "number", "of", "states", ".", "our", "algorithm", "is", "significantly", "faster", "than", "prior", "algorithms", ".", "additionally", ",", "our", "approach", "leads", "to", "a", "significantly", "faster", "algorithm", "for", "computing", "second", "-", "order", "expectations", ",", "such", "as", "covariance", "matrices", "and", "gradients", "of", "first", "-", "order", "expectations", "."]}, {"venue": "ACL", "title": "Risk Minimization for Zero-shot Sequence Labeling", "abstract": "Zero-shot sequence labeling aims to build a sequence labeler without human-annotated datasets. One straightforward approach is utilizing existing systems (source models) to generate pseudo-labeled datasets and train a target sequence labeler accordingly. However, due to the gap between the source and the target languages/domains, this approach may fail to recover the true labels. In this paper, we propose a novel unified framework for zero-shot sequence labeling with minimum risk training and design a new decomposable risk function that models the relations between the predicted labels from the source models and the true labels. By making the risk function trainable, we draw a connection between minimum risk training and latent variable model learning. We propose a unified learning algorithm based on the expectation maximization (EM) algorithm. We extensively evaluate our proposed approaches on cross-lingual/domain sequence labeling tasks over twenty-one datasets. The results show that our approaches outperform state-of-the-art baseline systems.", "doc_id": "9c5251322596cb8d7ff851091dba2006", "publication_year": 2021, "sentences": ["zero - shot sequence labeling aims to build a sequence labeler without human - annotated datasets .", "one straightforward approach is utilizing existing systems ( source models ) to generate pseudo - labeled datasets and train a target sequence labeler accordingly .", "however , due to the gap between the source and the target languages / domains , this approach may fail to recover the true labels .", "in this paper , we propose a novel unified framework for zero - shot sequence labeling with minimum risk training and design a new decomposable risk function that models the relations between the predicted labels from the source models and the true labels .", "by making the risk function trainable , we draw a connection between minimum risk training and latent variable model learning .", "we propose a unified learning algorithm based on the expectation maximization ( em ) algorithm .", "we extensively evaluate our proposed approaches on cross - lingual / domain sequence labeling tasks over twenty - one datasets .", "the results show that our approaches outperform state - of - the - art baseline systems ."], "events": [{"event_type": "ITT", "arguments": [{"text": "zero - shot sequence labeling", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["zero", "-", "shot", "sequence", "labeling"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "aims", "tokens": ["aims"], "offsets": [5]}}, {"event_type": "PUR", "arguments": [{"text": "pseudo - labeled datasets", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["pseudo", "-", "labeled", "datasets"], "offsets": [30, 31, 32, 33]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [29]}}, {"event_type": "RWS", "arguments": [{"text": "train", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["train"], "offsets": [35]}, {"text": "one straightforward approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["one", "straightforward", "approach"], "offsets": [17, 18, 19]}, {"text": "existing systems", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["existing", "systems"], "offsets": [22, 23]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [29]}], "trigger": {"text": "utilizing", "tokens": ["utilizing"], "offsets": [21]}}, {"event_type": "PUR", "arguments": [{"text": "target sequence labeler", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["target", "sequence", "labeler"], "offsets": [37, 38, 39]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [35]}}, {"event_type": "RWF", "arguments": [{"text": "straightforward approach", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["straightforward", "approach"], "offsets": [18, 19]}, {"text": "recover", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["recover"], "offsets": [63]}, {"text": "gap between the source and the target languages / domains", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["gap", "between", "the", "source", "and", "the", "target", "languages", "/", "domains"], "offsets": [47, 48, 49, 50, 51, 52, 53, 54, 55, 56]}], "trigger": {"text": "fail", "tokens": ["fail"], "offsets": [61]}}, {"event_type": "PUR", "arguments": [{"text": "true labels", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["true", "labels"], "offsets": [65, 66]}], "trigger": {"text": "recover", "tokens": ["recover"], "offsets": [63]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [72]}, {"text": "unified framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["unified", "framework"], "offsets": [76, 77]}, {"text": "zero - shot sequence labeling", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["zero", "-", "shot", "sequence", "labeling"], "offsets": [79, 80, 81, 82, 83]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [73]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [72]}, {"text": "decomposable risk function", "nugget_type": "APP", "argument_type": "Content", "tokens": ["decomposable", "risk", "function"], "offsets": [92, 93, 94]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [89]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [133]}, {"text": "unified learning algorithm based on the expectation maximization", "nugget_type": "APP", "argument_type": "Content", "tokens": ["unified", "learning", "algorithm", "based", "on", "the", "expectation", "maximization"], "offsets": [136, 137, 138, 139, 140, 141, 142, 143]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [134]}}, {"event_type": "MDS", "arguments": [{"text": "source models", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["source", "models"], "offsets": [105, 106]}, {"text": "true labels", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["true", "labels"], "offsets": [109, 110]}, {"text": "relations between the predicted labels", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["relations", "between", "the", "predicted", "labels"], "offsets": [98, 99, 100, 101, 102]}], "trigger": {"text": "models", "tokens": ["models"], "offsets": [96]}}, {"event_type": "MDS", "arguments": [{"text": "risk function", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["risk", "function"], "offsets": [115, 116]}, {"text": "trainable", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["trainable"], "offsets": [117]}], "trigger": {"text": "making", "tokens": ["making"], "offsets": [113]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [149]}, {"text": "unified learning algorithm based on the expectation maximization", "nugget_type": "APP", "argument_type": "Content", "tokens": ["unified", "learning", "algorithm", "based", "on", "the", "expectation", "maximization"], "offsets": [136, 137, 138, 139, 140, 141, 142, 143]}, {"text": "twenty - one datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["twenty", "-", "one", "datasets"], "offsets": [165, 166, 167, 168]}, {"text": "on cross - lingual / domain sequence labeling tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "cross", "-", "lingual", "/", "domain", "sequence", "labeling", "tasks"], "offsets": [155, 156, 157, 158, 159, 160, 161, 162, 163]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [151]}}, {"event_type": "FIN", "arguments": [{"text": "outperform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperform"], "offsets": [176]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [172]}}, {"event_type": "CMP", "arguments": [{"text": "outperform", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperform"], "offsets": [176]}, {"text": "state - of - the - art baseline systems", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "baseline", "systems"], "offsets": [177, 178, 179, 180, 181, 182, 183, 184, 185]}, {"text": "unified learning algorithm", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unified", "learning", "algorithm"], "offsets": [136, 137, 138]}, {"text": "unified framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unified", "framework"], "offsets": [76, 77]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [176]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [119]}, {"text": "connection between minimum risk training and latent variable model learning", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["connection", "between", "minimum", "risk", "training", "and", "latent", "variable", "model", "learning"], "offsets": [122, 123, 124, 125, 126, 127, 128, 129, 130, 131]}], "trigger": {"text": "draw", "tokens": ["draw"], "offsets": [120]}}], "document": ["zero", "-", "shot", "sequence", "labeling", "aims", "to", "build", "a", "sequence", "labeler", "without", "human", "-", "annotated", "datasets", ".", "one", "straightforward", "approach", "is", "utilizing", "existing", "systems", "(", "source", "models", ")", "to", "generate", "pseudo", "-", "labeled", "datasets", "and", "train", "a", "target", "sequence", "labeler", "accordingly", ".", "however", ",", "due", "to", "the", "gap", "between", "the", "source", "and", "the", "target", "languages", "/", "domains", ",", "this", "approach", "may", "fail", "to", "recover", "the", "true", "labels", ".", "in", "this", "paper", ",", "we", "propose", "a", "novel", "unified", "framework", "for", "zero", "-", "shot", "sequence", "labeling", "with", "minimum", "risk", "training", "and", "design", "a", "new", "decomposable", "risk", "function", "that", "models", "the", "relations", "between", "the", "predicted", "labels", "from", "the", "source", "models", "and", "the", "true", "labels", ".", "by", "making", "the", "risk", "function", "trainable", ",", "we", "draw", "a", "connection", "between", "minimum", "risk", "training", "and", "latent", "variable", "model", "learning", ".", "we", "propose", "a", "unified", "learning", "algorithm", "based", "on", "the", "expectation", "maximization", "(", "em", ")", "algorithm", ".", "we", "extensively", "evaluate", "our", "proposed", "approaches", "on", "cross", "-", "lingual", "/", "domain", "sequence", "labeling", "tasks", "over", "twenty", "-", "one", "datasets", ".", "the", "results", "show", "that", "our", "approaches", "outperform", "state", "-", "of", "-", "the", "-", "art", "baseline", "systems", "."]}, {"venue": "ACL", "title": "Unified Semantic Parsing with Weak Supervision", "abstract": "Semantic parsing over multiple knowledge bases enables a parser to exploit structural similarities of programs across the multiple domains. However, the fundamental challenge lies in obtaining high-quality annotations of (utterance, program) pairs across various domains needed for training such models. To overcome this, we propose a novel framework to build a unified multi-domain enabled semantic parser trained only with weak supervision (denotations). Weakly supervised training is particularly arduous as the program search space grows exponentially in a multi-domain setting. To solve this, we incorporate a multi-policy distillation mechanism in which we first train domain-specific semantic parsers (teachers) using weak supervision in the absence of the ground truth programs, followed by training a single unified parser (student) from the domain specific policies obtained from these teachers. The resultant semantic parser is not only compact but also generalizes better, and generates more accurate programs. It further does not require the user to provide a domain label while querying. On the standard Overnight dataset (containing multiple domains), we demonstrate that the proposed model improves performance by 20% in terms of denotation accuracy in comparison to baseline techniques.", "doc_id": "5a5a1c82f81ff2cdca471ed3f41f4024", "publication_year": 2019, "sentences": ["semantic parsing over multiple knowledge bases enables a parser to exploit structural similarities of programs across the multiple domains .", "however , the fundamental challenge lies in obtaining high - quality annotations of ( utterance , program ) pairs across various domains needed for training such models .", "to overcome this , we propose a novel framework to build a unified multi - domain enabled semantic parser trained only with weak supervision ( denotations ) .", "weakly supervised training is particularly arduous as the program search space grows exponentially in a multi - domain setting .", "to solve this , we incorporate a multi - policy distillation mechanism in which we first train domain - specific semantic parsers ( teachers ) using weak supervision in the absence of the ground truth programs , followed by training a single unified parser ( student ) from the domain specific policies obtained from these teachers .", "the resultant semantic parser is not only compact but also generalizes better , and generates more accurate programs .", "it further does not require the user to provide a domain label while querying .", "on the standard overnight dataset ( containing multiple domains ) , we demonstrate that the proposed model improves performance by 20 % in terms of denotation accuracy in comparison to baseline techniques ."], "events": [{"event_type": "ITT", "arguments": [{"text": "semantic parsing over multiple knowledge bases", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["semantic", "parsing", "over", "multiple", "knowledge", "bases"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "enables", "tokens": ["enables"], "offsets": [6]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [52]}, {"text": "novel framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["novel", "framework"], "offsets": [55, 56]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [53]}}, {"event_type": "MDS", "arguments": [{"text": "unified multi - domain enabled semantic parser", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["unified", "multi", "-", "domain", "enabled", "semantic", "parser"], "offsets": [60, 61, 62, 63, 64, 65, 66]}, {"text": "weak supervision", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["weak", "supervision"], "offsets": [70, 71]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [58]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [100]}, {"text": "multi - policy distillation mechanism", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "policy", "distillation", "mechanism"], "offsets": [103, 104, 105, 106, 107]}], "trigger": {"text": "incorporate", "tokens": ["incorporate"], "offsets": [101]}}, {"event_type": "MDS", "arguments": [{"text": "weak supervision", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["weak", "supervision"], "offsets": [122, 123]}, {"text": "domain - specific semantic parsers", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["domain", "-", "specific", "semantic", "parsers"], "offsets": [113, 114, 115, 116, 117]}, {"text": "in the absence of the ground truth programs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "absence", "of", "the", "ground", "truth", "programs"], "offsets": [124, 125, 126, 127, 128, 129, 130, 131]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [112]}}, {"event_type": "MDS", "arguments": [{"text": "domain - specific semantic parsers", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["domain", "-", "specific", "semantic", "parsers"], "offsets": [113, 114, 115, 116, 117]}, {"text": "domain specific policies", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["domain", "specific", "policies"], "offsets": [145, 146, 147]}, {"text": "single unified parser", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["single", "unified", "parser"], "offsets": [137, 138, 139]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [135]}}, {"event_type": "FAC", "arguments": [{"text": "resultant semantic parser", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["resultant", "semantic", "parser"], "offsets": [154, 155, 156]}, {"text": "more accurate programs", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["more", "accurate", "programs"], "offsets": [168, 169, 170]}], "trigger": {"text": "generates", "tokens": ["generates"], "offsets": [167]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [198]}, {"text": "improves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["improves"], "offsets": [204]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [199]}}, {"event_type": "CMP", "arguments": [{"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [204]}, {"text": "20 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["20", "%"], "offsets": [207, 208]}, {"text": "denotation accuracy", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["denotation", "accuracy"], "offsets": [212, 213]}, {"text": "standard overnight dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["standard", "overnight", "dataset"], "offsets": [189, 190, 191]}, {"text": "baseline techniques", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baseline", "techniques"], "offsets": [217, 218]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [204]}}], "document": ["semantic", "parsing", "over", "multiple", "knowledge", "bases", "enables", "a", "parser", "to", "exploit", "structural", "similarities", "of", "programs", "across", "the", "multiple", "domains", ".", "however", ",", "the", "fundamental", "challenge", "lies", "in", "obtaining", "high", "-", "quality", "annotations", "of", "(", "utterance", ",", "program", ")", "pairs", "across", "various", "domains", "needed", "for", "training", "such", "models", ".", "to", "overcome", "this", ",", "we", "propose", "a", "novel", "framework", "to", "build", "a", "unified", "multi", "-", "domain", "enabled", "semantic", "parser", "trained", "only", "with", "weak", "supervision", "(", "denotations", ")", ".", "weakly", "supervised", "training", "is", "particularly", "arduous", "as", "the", "program", "search", "space", "grows", "exponentially", "in", "a", "multi", "-", "domain", "setting", ".", "to", "solve", "this", ",", "we", "incorporate", "a", "multi", "-", "policy", "distillation", "mechanism", "in", "which", "we", "first", "train", "domain", "-", "specific", "semantic", "parsers", "(", "teachers", ")", "using", "weak", "supervision", "in", "the", "absence", "of", "the", "ground", "truth", "programs", ",", "followed", "by", "training", "a", "single", "unified", "parser", "(", "student", ")", "from", "the", "domain", "specific", "policies", "obtained", "from", "these", "teachers", ".", "the", "resultant", "semantic", "parser", "is", "not", "only", "compact", "but", "also", "generalizes", "better", ",", "and", "generates", "more", "accurate", "programs", ".", "it", "further", "does", "not", "require", "the", "user", "to", "provide", "a", "domain", "label", "while", "querying", ".", "on", "the", "standard", "overnight", "dataset", "(", "containing", "multiple", "domains", ")", ",", "we", "demonstrate", "that", "the", "proposed", "model", "improves", "performance", "by", "20", "%", "in", "terms", "of", "denotation", "accuracy", "in", "comparison", "to", "baseline", "techniques", "."]}, {"venue": "ACL", "title": "Exact Hard Monotonic Attention for Character-Level Transduction", "abstract": "Many common character-level, string-to-string transduction tasks, e.g., grapheme-to-phoneme conversion and morphological inflection, consist almost exclusively of monotonic transduction. Neural sequence-to-sequence models with soft attention, non-monotonic models, outperform popular monotonic models. In this work, we ask the following question: Is monotonicity really a helpful inductive bias in these tasks? We develop a hard attention sequence-to-sequence model that enforces strict monotonicity and learns alignment jointly. With the help of dynamic programming, we are able to compute the exact marginalization over all alignments. Our models achieve state-of-the-art performance on morphological inflection. Furthermore, we find strong performance on two other character-level transduction tasks. Code is available at https://github.com/shijie-wu/neural-transducer.", "doc_id": "f57ad00778616d4dbcb03dce6c853fc8", "publication_year": 2019, "sentences": ["many common character - level , string - to - string transduction tasks , e . g . , grapheme - to - phoneme conversion and morphological inflection , consist almost exclusively of monotonic transduction .", "neural sequence - to - sequence models with soft attention , non - monotonic models , outperform popular monotonic models .", "in this work , we ask the following question : is monotonicity really a helpful inductive bias in these tasks ?", "we develop a hard attention sequence - to - sequence model that enforces strict monotonicity and learns alignment jointly .", "with the help of dynamic programming , we are able to compute the exact marginalization over all alignments .", "our models achieve state - of - the - art performance on morphological inflection .", "furthermore , we find strong performance on two other character - level transduction tasks .", "code is available at https : / / github . com / shijie - wu / neural - transducer ."], "events": [{"event_type": "ITT", "arguments": [{"text": "monotonic transduction", "nugget_type": "APP", "argument_type": "Target", "tokens": ["monotonic", "transduction"], "offsets": [33, 34]}], "trigger": {"text": "consist", "tokens": ["consist"], "offsets": [29]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [78]}, {"text": "hard attention sequence - to - sequence model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hard", "attention", "sequence", "-", "to", "-", "sequence", "model"], "offsets": [81, 82, 83, 84, 85, 86, 87, 88]}, {"text": "enforces", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enforces"], "offsets": [90]}, {"text": "learns", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learns"], "offsets": [94]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [79]}}, {"event_type": "PUR", "arguments": [{"text": "strict monotonicity", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["strict", "monotonicity"], "offsets": [91, 92]}], "trigger": {"text": "enforces", "tokens": ["enforces"], "offsets": [90]}}, {"event_type": "PUR", "arguments": [{"text": "alignment", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["alignment"], "offsets": [95]}, {"text": "jointly", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["jointly"], "offsets": [96]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [94]}}, {"event_type": "MDS", "arguments": [{"text": "exact marginalization", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["exact", "marginalization"], "offsets": [111, 112]}, {"text": "dynamic programming", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["dynamic", "programming"], "offsets": [102, 103]}], "trigger": {"text": "compute", "tokens": ["compute"], "offsets": [109]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [120, 121, 122, 123, 124, 125, 126, 127]}, {"text": "morphological inflection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["morphological", "inflection"], "offsets": [129, 130]}, {"text": "models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["models"], "offsets": [118]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [119]}}, {"event_type": "FAC", "arguments": [{"text": "strong performance", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["strong", "performance"], "offsets": [136, 137]}, {"text": "character - level transduction tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["character", "-", "level", "transduction", "tasks"], "offsets": [141, 142, 143, 144, 145]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [135]}}], "document": ["many", "common", "character", "-", "level", ",", "string", "-", "to", "-", "string", "transduction", "tasks", ",", "e", ".", "g", ".", ",", "grapheme", "-", "to", "-", "phoneme", "conversion", "and", "morphological", "inflection", ",", "consist", "almost", "exclusively", "of", "monotonic", "transduction", ".", "neural", "sequence", "-", "to", "-", "sequence", "models", "with", "soft", "attention", ",", "non", "-", "monotonic", "models", ",", "outperform", "popular", "monotonic", "models", ".", "in", "this", "work", ",", "we", "ask", "the", "following", "question", ":", "is", "monotonicity", "really", "a", "helpful", "inductive", "bias", "in", "these", "tasks", "?", "we", "develop", "a", "hard", "attention", "sequence", "-", "to", "-", "sequence", "model", "that", "enforces", "strict", "monotonicity", "and", "learns", "alignment", "jointly", ".", "with", "the", "help", "of", "dynamic", "programming", ",", "we", "are", "able", "to", "compute", "the", "exact", "marginalization", "over", "all", "alignments", ".", "our", "models", "achieve", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "morphological", "inflection", ".", "furthermore", ",", "we", "find", "strong", "performance", "on", "two", "other", "character", "-", "level", "transduction", "tasks", ".", "code", "is", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "shijie", "-", "wu", "/", "neural", "-", "transducer", "."]}, {"venue": "ACL", "title": "AlephBERT: Language Model Pre-training and Evaluation from Sub-Word to Sentence Level", "abstract": "Large Pre-trained Language Models (PLMs) have become ubiquitous in the development of language understanding technology and lie at the heart of many artificial intelligence advances. While advances reported for English using PLMs are unprecedented, reported advances using PLMs for Hebrew are few and far between. The problem is twofold. First, so far, Hebrew resources for training large language models are not of the same magnitude as their English counterparts. Second, most benchmarks available to evaluate progress in Hebrew NLP require morphological boundaries which are not available in the output of standard PLMs. In this work we remedy both aspects. We present AlephBERT, a large PLM for Modern Hebrew, trained on larger vocabulary and a larger dataset than any Hebrew PLM before. Moreover, we introduce a novel neural architecture that recovers the morphological segments encoded in contextualized embedding vectors. Based on this new morphological component we offer an evaluation suite consisting of multiple tasks and benchmarks that cover sentence-level, word-level and sub-word level analyses. On all tasks, AlephBERT obtains state-of-the-art results beyond contemporary Hebrew baselines. We make our AlephBERT model, the morphological extraction model, and the Hebrew evaluation suite publicly available, for evaluating future Hebrew PLMs.", "doc_id": "b895ce9cd27c63f5f84d5d745e144467", "publication_year": 2022, "sentences": ["large pre - trained language models ( plms ) have become ubiquitous in the development of language understanding technology and lie at the heart of many artificial intelligence advances .", "while advances reported for english using plms are unprecedented , reported advances using plms for hebrew are few and far between .", "the problem is twofold .", "first , so far , hebrew resources for training large language models are not of the same magnitude as their english counterparts .", "second , most benchmarks available to evaluate progress in hebrew nlp require morphological boundaries which are not available in the output of standard plms .", "in this work we remedy both aspects .", "we present alephbert , a large plm for modern hebrew , trained on larger vocabulary and a larger dataset than any hebrew plm before .", "moreover , we introduce a novel neural architecture that recovers the morphological segments encoded in contextualized embedding vectors .", "based on this new morphological component we offer an evaluation suite consisting of multiple tasks and benchmarks that cover sentence - level , word - level and sub - word level analyses .", "on all tasks , alephbert obtains state - of - the - art results beyond contemporary hebrew baselines .", "we make our alephbert model , the morphological extraction model , and the hebrew evaluation suite publicly available , for evaluating future hebrew plms ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pre - trained language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["plms"], "offsets": [43]}], "trigger": {"text": "become", "tokens": ["become"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "reported", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["reported"], "offsets": [40]}, {"text": "plms for hebrew", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["plms", "for", "hebrew"], "offsets": [43, 44, 45]}], "trigger": {"text": "few and far", "tokens": ["few", "and", "far"], "offsets": [47, 48, 49]}}, {"event_type": "RWF", "arguments": [{"text": "output of standard plms", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["output", "of", "standard", "pre", "-", "trained", "language", "models"], "offsets": [100, 101, 102, 1, 2, 3, 4, 5]}, {"text": "morphological boundaries", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["morphological", "boundaries"], "offsets": [92, 93]}], "trigger": {"text": "not available", "tokens": ["not", "available"], "offsets": [96, 97]}}, {"event_type": "RWF", "arguments": [{"text": "hebrew resources", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["hebrew", "resources"], "offsets": [62, 63]}, {"text": "training", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["training"], "offsets": [65]}], "trigger": {"text": "not of the same magnitude", "tokens": ["not", "of", "the", "same", "magnitude"], "offsets": [70, 71, 72, 73, 74]}}, {"event_type": "PUR", "arguments": [{"text": "large language models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["large", "language", "models"], "offsets": [66, 67, 68]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [65]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [108]}, {"text": "both aspects", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["both", "aspects"], "offsets": [110, 111]}], "trigger": {"text": "remedy", "tokens": ["remedy"], "offsets": [109]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [113]}, {"text": "alephbert", "nugget_type": "APP", "argument_type": "Content", "tokens": ["alephbert"], "offsets": [115]}, {"text": "trained", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["trained"], "offsets": [124]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [114]}}, {"event_type": "PUR", "arguments": [{"text": "larger vocabulary", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["larger", "vocabulary"], "offsets": [126, 127]}, {"text": "larger dataset", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["larger", "dataset"], "offsets": [130, 131]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [124]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [140]}, {"text": "neural architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neural", "architecture"], "offsets": [144, 145]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [141]}}, {"event_type": "MDS", "arguments": [{"text": "morphological segments", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["morphological", "segments"], "offsets": [149, 150]}, {"text": "contextualized embedding vectors", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["contextualized", "embedding", "vectors"], "offsets": [153, 154, 155]}], "trigger": {"text": "encoded", "tokens": ["encoded"], "offsets": [151]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [163]}, {"text": "evaluation suite", "nugget_type": "APP", "argument_type": "Content", "tokens": ["evaluation", "suite"], "offsets": [166, 167]}, {"text": "based on this new morphological component", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "this", "new", "morphological", "component"], "offsets": [157, 158, 159, 160, 161, 162]}], "trigger": {"text": "offer", "tokens": ["offer"], "offsets": [164]}}, {"event_type": "CMP", "arguments": [{"text": "alephbert", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["alephbert"], "offsets": [194]}, {"text": "contemporary hebrew baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["contemporary", "hebrew", "baselines"], "offsets": [205, 206, 207]}, {"text": "state - of - the - art results", "nugget_type": "STR", "argument_type": "Result", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [196, 197, 198, 199, 200, 201, 202, 203]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [195]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [209]}, {"text": "alephbert model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["alephbert", "model"], "offsets": [212, 213]}, {"text": "morphological extraction model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["morphological", "extraction", "model"], "offsets": [216, 217, 218]}, {"text": "hebrew evaluation suite", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hebrew", "evaluation", "suite"], "offsets": [222, 223, 224]}, {"text": "evaluating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluating"], "offsets": [229]}], "trigger": {"text": "make", "tokens": ["make"], "offsets": [210]}}, {"event_type": "PUR", "arguments": [{"text": "future hebrew plms", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["future", "hebrew", "pre", "-", "trained", "language", "models"], "offsets": [230, 231, 1, 2, 3, 4, 5]}], "trigger": {"text": "evaluating", "tokens": ["evaluating"], "offsets": [229]}}], "document": ["large", "pre", "-", "trained", "language", "models", "(", "plms", ")", "have", "become", "ubiquitous", "in", "the", "development", "of", "language", "understanding", "technology", "and", "lie", "at", "the", "heart", "of", "many", "artificial", "intelligence", "advances", ".", "while", "advances", "reported", "for", "english", "using", "plms", "are", "unprecedented", ",", "reported", "advances", "using", "plms", "for", "hebrew", "are", "few", "and", "far", "between", ".", "the", "problem", "is", "twofold", ".", "first", ",", "so", "far", ",", "hebrew", "resources", "for", "training", "large", "language", "models", "are", "not", "of", "the", "same", "magnitude", "as", "their", "english", "counterparts", ".", "second", ",", "most", "benchmarks", "available", "to", "evaluate", "progress", "in", "hebrew", "nlp", "require", "morphological", "boundaries", "which", "are", "not", "available", "in", "the", "output", "of", "standard", "plms", ".", "in", "this", "work", "we", "remedy", "both", "aspects", ".", "we", "present", "alephbert", ",", "a", "large", "plm", "for", "modern", "hebrew", ",", "trained", "on", "larger", "vocabulary", "and", "a", "larger", "dataset", "than", "any", "hebrew", "plm", "before", ".", "moreover", ",", "we", "introduce", "a", "novel", "neural", "architecture", "that", "recovers", "the", "morphological", "segments", "encoded", "in", "contextualized", "embedding", "vectors", ".", "based", "on", "this", "new", "morphological", "component", "we", "offer", "an", "evaluation", "suite", "consisting", "of", "multiple", "tasks", "and", "benchmarks", "that", "cover", "sentence", "-", "level", ",", "word", "-", "level", "and", "sub", "-", "word", "level", "analyses", ".", "on", "all", "tasks", ",", "alephbert", "obtains", "state", "-", "of", "-", "the", "-", "art", "results", "beyond", "contemporary", "hebrew", "baselines", ".", "we", "make", "our", "alephbert", "model", ",", "the", "morphological", "extraction", "model", ",", "and", "the", "hebrew", "evaluation", "suite", "publicly", "available", ",", "for", "evaluating", "future", "hebrew", "plms", "."]}, {"venue": "ACL", "title": "Generating Landmark Navigation Instructions from Maps as a Graph-to-Text Problem", "abstract": "Car-focused navigation services are based on turns and distances of named streets, whereas navigation instructions naturally used by humans are centered around physical objects called landmarks. We present a neural model that takes OpenStreetMap representations as input and learns to generate navigation instructions that contain visible and salient landmarks from human natural language instructions. Routes on the map are encoded in a location- and rotation-invariant graph representation that is decoded into natural language instructions. Our work is based on a novel dataset of 7,672 crowd-sourced instances that have been verified by human navigation in Street View. Our evaluation shows that the navigation instructions generated by our system have similar properties as human-generated instructions, and lead to successful human navigation in Street View.", "doc_id": "eeae43e4c7b0eb71b4e8fc9f00affe61", "publication_year": 2021, "sentences": ["car - focused navigation services are based on turns and distances of named streets , whereas navigation instructions naturally used by humans are centered around physical objects called landmarks .", "we present a neural model that takes openstreetmap representations as input and learns to generate navigation instructions that contain visible and salient landmarks from human natural language instructions .", "routes on the map are encoded in a location - and rotation - invariant graph representation that is decoded into natural language instructions .", "our work is based on a novel dataset of 7 , 672 crowd - sourced instances that have been verified by human navigation in street view .", "our evaluation shows that the navigation instructions generated by our system have similar properties as human - generated instructions , and lead to successful human navigation in street view ."], "events": [{"event_type": "ITT", "arguments": [{"text": "car - focused navigation services", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["car", "-", "focused", "navigation", "services"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "based", "tokens": ["based"], "offsets": [6]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [30]}, {"text": "neural model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neural", "model"], "offsets": [33, 34]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [31]}}, {"event_type": "MDS", "arguments": [{"text": "input", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["input"], "offsets": [40]}, {"text": "openstreetmap representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["openstreetmap", "representations"], "offsets": [37, 38]}], "trigger": {"text": "takes", "tokens": ["takes"], "offsets": [36]}}, {"event_type": "MDS", "arguments": [{"text": "navigation instructions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["navigation", "instructions"], "offsets": [45, 46]}, {"text": "human natural language instructions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["human", "natural", "language", "instructions"], "offsets": [54, 55, 56, 57]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [44]}}, {"event_type": "MDS", "arguments": [{"text": "routes", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["routes"], "offsets": [59]}, {"text": "location - and rotation - invariant graph representation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["location", "-", "and", "rotation", "-", "invariant", "graph", "representation"], "offsets": [67, 68, 69, 70, 71, 72, 73, 74]}], "trigger": {"text": "encoded", "tokens": ["encoded"], "offsets": [64]}}, {"event_type": "MDS", "arguments": [{"text": "location - and rotation - invariant graph representation", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["location", "-", "and", "rotation", "-", "invariant", "graph", "representation"], "offsets": [67, 68, 69, 70, 71, 72, 73, 74]}, {"text": "natural language instructions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["natural", "language", "instructions"], "offsets": [79, 80, 81]}], "trigger": {"text": "decoded", "tokens": ["decoded"], "offsets": [77]}}, {"event_type": "FIN", "arguments": [{"text": "similar", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["similar"], "offsets": [122]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [112]}}, {"event_type": "CMP", "arguments": [{"text": "navigation instructions", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["navigation", "instructions"], "offsets": [115, 116]}, {"text": "generated by our system", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["generated", "by", "neural", "model"], "offsets": [117, 118, 33, 34]}, {"text": "similar", "nugget_type": "STR", "argument_type": "Result", "tokens": ["similar"], "offsets": [122]}, {"text": "properties", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["properties"], "offsets": [123]}], "trigger": {"text": "similar", "tokens": ["similar"], "offsets": [122]}}, {"event_type": "FIN", "arguments": [{"text": "lead", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["lead"], "offsets": [131]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [112]}}, {"event_type": "FAC", "arguments": [{"text": "successful human navigation", "nugget_type": "STR", "argument_type": "Object", "tokens": ["successful", "human", "navigation"], "offsets": [133, 134, 135]}, {"text": "in street view", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "street", "view"], "offsets": [136, 137, 138]}, {"text": "navigation instructions", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["navigation", "instructions"], "offsets": [115, 116]}, {"text": "generated by our system", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["generated", "by", "neural", "model"], "offsets": [117, 118, 33, 34]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [131]}}], "document": ["car", "-", "focused", "navigation", "services", "are", "based", "on", "turns", "and", "distances", "of", "named", "streets", ",", "whereas", "navigation", "instructions", "naturally", "used", "by", "humans", "are", "centered", "around", "physical", "objects", "called", "landmarks", ".", "we", "present", "a", "neural", "model", "that", "takes", "openstreetmap", "representations", "as", "input", "and", "learns", "to", "generate", "navigation", "instructions", "that", "contain", "visible", "and", "salient", "landmarks", "from", "human", "natural", "language", "instructions", ".", "routes", "on", "the", "map", "are", "encoded", "in", "a", "location", "-", "and", "rotation", "-", "invariant", "graph", "representation", "that", "is", "decoded", "into", "natural", "language", "instructions", ".", "our", "work", "is", "based", "on", "a", "novel", "dataset", "of", "7", ",", "672", "crowd", "-", "sourced", "instances", "that", "have", "been", "verified", "by", "human", "navigation", "in", "street", "view", ".", "our", "evaluation", "shows", "that", "the", "navigation", "instructions", "generated", "by", "our", "system", "have", "similar", "properties", "as", "human", "-", "generated", "instructions", ",", "and", "lead", "to", "successful", "human", "navigation", "in", "street", "view", "."]}, {"venue": "ACL", "title": "Requirements and Motivations of Low-Resource Speech Synthesis for Language Revitalization", "abstract": "This paper describes the motivation and development of speech synthesis systems for the purposes of language revitalization. By building speech synthesis systems for three Indigenous languages spoken in Canada, Kanien\u2019k\u00e9ha, Gitksan & SEN\u0106O\u0166EN, we re-evaluate the question of how much data is required to build low-resource speech synthesis systems featuring state-of-the-art neural models. For example, preliminary results with English data show that a FastSpeech2 model trained with 1 hour of training data can produce speech with comparable naturalness to a Tacotron2 model trained with 10 hours of data. Finally, we motivate future research in evaluation and classroom integration in the field of speech synthesis for language revitalization.", "doc_id": "251979591aa6c3ddee675fa35ee0b04c", "publication_year": 2022, "sentences": ["this paper describes the motivation and development of speech synthesis systems for the purposes of language revitalization .", "by building speech synthesis systems for three indigenous languages spoken in canada , kanien \u2019 keha , gitksan & [UNK] , we re - evaluate the question of how much data is required to build low - resource speech synthesis systems featuring state - of - the - art neural models .", "for example , preliminary results with english data show that a fastspeech2 model trained with 1 hour of training data can produce speech with comparable naturalness to a tacotron2 model trained with 10 hours of data .", "finally , we motivate future research in evaluation and classroom integration in the field of speech synthesis for language revitalization ."], "events": [{"event_type": "WKS", "arguments": [{"text": "language revitalization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["language", "revitalization"], "offsets": [15, 16]}, {"text": "motivation of speech synthesis systems", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["motivation", "of", "speech", "synthesis", "systems"], "offsets": [4, 7, 8, 9, 10]}, {"text": "development of speech synthesis systems", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["development", "of", "speech", "synthesis", "systems"], "offsets": [6, 7, 8, 9, 10]}], "trigger": {"text": "describes", "tokens": ["describes"], "offsets": [2]}}, {"event_type": "MDS", "arguments": [{"text": "speech synthesis systems for three indigenous languages", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["speech", "synthesis", "systems", "for", "three", "indigenous", "languages"], "offsets": [20, 21, 22, 23, 24, 25, 26]}, {"text": "re - evaluate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["re", "-", "evaluate"], "offsets": [40, 41, 42]}], "trigger": {"text": "building", "tokens": ["building"], "offsets": [19]}}, {"event_type": "PUR", "arguments": [{"text": "data", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["data"], "offsets": [48]}, {"text": "required to build low - resource speech synthesis systems featuring state - of - the - art neural models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["required", "to", "build", "low", "-", "resource", "speech", "synthesis", "systems", "featuring", "state", "-", "of", "-", "the", "-", "art", "neural", "models"], "offsets": [50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68]}], "trigger": {"text": "re - evaluate", "tokens": ["re", "-", "evaluate"], "offsets": [40, 41, 42]}}, {"event_type": "FIN", "arguments": [{"text": "produce", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["produce"], "offsets": [91]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [78]}}, {"event_type": "CMP", "arguments": [{"text": "fastspeech2 model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["fastspeech2", "model"], "offsets": [81, 82]}, {"text": "trained with 1 hour of training data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["trained", "with", "1", "hour", "of", "training", "data"], "offsets": [83, 84, 85, 86, 87, 88, 89]}, {"text": "speech with comparable naturalness", "nugget_type": "STR", "argument_type": "Result", "tokens": ["speech", "with", "comparable", "naturalness"], "offsets": [92, 93, 94, 95]}, {"text": "tacotron2 model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["tacotron2", "model"], "offsets": [98, 99]}, {"text": "trained with 10 hours of data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["trained", "with", "10", "hours", "of", "data"], "offsets": [100, 101, 102, 103, 104, 105]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [91]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [109]}, {"text": "language revitalization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["language", "revitalization"], "offsets": [125, 126]}, {"text": "future research in evaluation and classroom integration", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["future", "research", "in", "evaluation", "and", "classroom", "integration"], "offsets": [111, 112, 113, 114, 115, 116, 117]}, {"text": "in the field of speech synthesis", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "field", "of", "speech", "synthesis"], "offsets": [118, 119, 120, 121, 122, 123]}], "trigger": {"text": "motivate", "tokens": ["motivate"], "offsets": [110]}}], "document": ["this", "paper", "describes", "the", "motivation", "and", "development", "of", "speech", "synthesis", "systems", "for", "the", "purposes", "of", "language", "revitalization", ".", "by", "building", "speech", "synthesis", "systems", "for", "three", "indigenous", "languages", "spoken", "in", "canada", ",", "kanien", "\u2019", "keha", ",", "gitksan", "&", "[UNK]", ",", "we", "re", "-", "evaluate", "the", "question", "of", "how", "much", "data", "is", "required", "to", "build", "low", "-", "resource", "speech", "synthesis", "systems", "featuring", "state", "-", "of", "-", "the", "-", "art", "neural", "models", ".", "for", "example", ",", "preliminary", "results", "with", "english", "data", "show", "that", "a", "fastspeech2", "model", "trained", "with", "1", "hour", "of", "training", "data", "can", "produce", "speech", "with", "comparable", "naturalness", "to", "a", "tacotron2", "model", "trained", "with", "10", "hours", "of", "data", ".", "finally", ",", "we", "motivate", "future", "research", "in", "evaluation", "and", "classroom", "integration", "in", "the", "field", "of", "speech", "synthesis", "for", "language", "revitalization", "."]}, {"venue": "ACL", "title": "Can Prompt Probe Pretrained Language Models? Understanding the Invisible Risks from a Causal View", "abstract": "Prompt-based probing has been widely used in evaluating the abilities of pretrained language models (PLMs). Unfortunately, recent studies have discovered such an evaluation may be inaccurate, inconsistent and unreliable. Furthermore, the lack of understanding its inner workings, combined with its wide applicability, has the potential to lead to unforeseen risks for evaluating and applying PLMs in real-world applications. To discover, understand and quantify the risks, this paper investigates the prompt-based probing from a causal view, highlights three critical biases which could induce biased results and conclusions, and proposes to conduct debiasing via causal intervention. This paper provides valuable insights for the design of unbiased datasets, better probing frameworks and more reliable evaluations of pretrained language models. Furthermore, our conclusions also echo that we need to rethink the criteria for identifying better pretrained language models.", "doc_id": "894bd32580ef6fac20574b98fa21885f", "publication_year": 2022, "sentences": ["prompt - based probing has been widely used in evaluating the abilities of pretrained language models ( plms ) .", "unfortunately , recent studies have discovered such an evaluation may be inaccurate , inconsistent and unreliable .", "furthermore , the lack of understanding its inner workings , combined with its wide applicability , has the potential to lead to unforeseen risks for evaluating and applying plms in real - world applications .", "to discover , understand and quantify the risks , this paper investigates the prompt - based probing from a causal view , highlights three critical biases which could induce biased results and conclusions , and proposes to conduct debiasing via causal intervention .", "this paper provides valuable insights for the design of unbiased datasets , better probing frameworks and more reliable evaluations of pretrained language models .", "furthermore , our conclusions also echo that we need to rethink the criteria for identifying better pretrained language models ."], "events": [{"event_type": "ITT", "arguments": [{"text": "prompt - based probing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["prompt", "-", "based", "probing"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "widely used", "tokens": ["widely", "used"], "offsets": [6, 7]}}, {"event_type": "RWF", "arguments": [{"text": "prompt - based probing", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["prompt", "-", "based", "probing"], "offsets": [0, 1, 2, 3]}, {"text": "inaccurate", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["inaccurate"], "offsets": [31]}, {"text": "inconsistent", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["inconsistent"], "offsets": [33]}, {"text": "unreliable", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unreliable"], "offsets": [35]}], "trigger": {"text": "discovered", "tokens": ["discovered"], "offsets": [25]}}, {"event_type": "RWF", "arguments": [{"text": "prompt - based probing", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["prompt", "-", "based", "probing"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "lack of understanding its inner workings", "tokens": ["lack", "of", "understanding", "its", "inner", "workings"], "offsets": [40, 41, 42, 43, 44, 45]}}, {"event_type": "WKS", "arguments": [{"text": "prompt - based probing", "nugget_type": "APP", "argument_type": "Content", "tokens": ["prompt", "-", "based", "probing"], "offsets": [85, 86, 87, 88]}, {"text": "from a causal view", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "a", "causal", "view"], "offsets": [89, 90, 91, 92]}, {"text": "discover , understand and quantify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["discover", "understand", "and", "quantify"], "offsets": [73, 75, 76, 77]}], "trigger": {"text": "investigates", "tokens": ["investigates"], "offsets": [83]}}, {"event_type": "PUR", "arguments": [{"text": "risks", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["risks"], "offsets": [79]}], "trigger": {"text": "discover , understand and quantify", "tokens": ["discover", ",", "understand", "and", "quantify"], "offsets": [73, 74, 75, 76, 77]}}, {"event_type": "WKS", "arguments": [{"text": "three critical biases", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["three", "critical", "biases"], "offsets": [95, 96, 97]}], "trigger": {"text": "highlights", "tokens": ["highlights"], "offsets": [94]}}, {"event_type": "WKS", "arguments": [{"text": "debiasing", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["debiasing"], "offsets": [110]}, {"text": "via causal intervention", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "causal", "intervention"], "offsets": [111, 112, 113]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [109]}}, {"event_type": "WKS", "arguments": [{"text": "valuable insights", "nugget_type": "STR", "argument_type": "Content", "tokens": ["valuable", "insights"], "offsets": [118, 119]}, {"text": "design of unbiased datasets", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["design", "of", "unbiased", "datasets"], "offsets": [122, 123, 124, 125]}, {"text": "better probing frameworks", "nugget_type": "STR", "argument_type": "Target", "tokens": ["better", "probing", "frameworks"], "offsets": [127, 128, 129]}, {"text": "more reliable evaluations of pretrained language models", "nugget_type": "STR", "argument_type": "Target", "tokens": ["more", "reliable", "evaluations", "of", "pretrained", "language", "models"], "offsets": [131, 132, 133, 134, 135, 136, 137]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [117]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [146]}, {"text": "criteria", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["criteria"], "offsets": [151]}, {"text": "identifying", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["identifying"], "offsets": [153]}], "trigger": {"text": "rethink", "tokens": ["rethink"], "offsets": [149]}}, {"event_type": "PUR", "arguments": [{"text": "better pretrained language models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["better", "pretrained", "language", "models"], "offsets": [154, 155, 156, 157]}], "trigger": {"text": "identifying", "tokens": ["identifying"], "offsets": [153]}}], "document": ["prompt", "-", "based", "probing", "has", "been", "widely", "used", "in", "evaluating", "the", "abilities", "of", "pretrained", "language", "models", "(", "plms", ")", ".", "unfortunately", ",", "recent", "studies", "have", "discovered", "such", "an", "evaluation", "may", "be", "inaccurate", ",", "inconsistent", "and", "unreliable", ".", "furthermore", ",", "the", "lack", "of", "understanding", "its", "inner", "workings", ",", "combined", "with", "its", "wide", "applicability", ",", "has", "the", "potential", "to", "lead", "to", "unforeseen", "risks", "for", "evaluating", "and", "applying", "plms", "in", "real", "-", "world", "applications", ".", "to", "discover", ",", "understand", "and", "quantify", "the", "risks", ",", "this", "paper", "investigates", "the", "prompt", "-", "based", "probing", "from", "a", "causal", "view", ",", "highlights", "three", "critical", "biases", "which", "could", "induce", "biased", "results", "and", "conclusions", ",", "and", "proposes", "to", "conduct", "debiasing", "via", "causal", "intervention", ".", "this", "paper", "provides", "valuable", "insights", "for", "the", "design", "of", "unbiased", "datasets", ",", "better", "probing", "frameworks", "and", "more", "reliable", "evaluations", "of", "pretrained", "language", "models", ".", "furthermore", ",", "our", "conclusions", "also", "echo", "that", "we", "need", "to", "rethink", "the", "criteria", "for", "identifying", "better", "pretrained", "language", "models", "."]}, {"venue": "ACL", "title": "QuoteR: A Benchmark of Quote Recommendation for Writing", "abstract": "It is very common to use quotations (quotes) to make our writings more elegant or convincing. To help people find appropriate quotes efficiently, the task of quote recommendation is presented, aiming to recommend quotes that fit the current context of writing. There have been various quote recommendation approaches, but they are evaluated on different unpublished datasets. To facilitate the research on this task, we build a large and fully open quote recommendation dataset called QuoteR, which comprises three parts including English, standard Chinese and classical Chinese. Any part of it is larger than previous unpublished counterparts. We conduct an extensive evaluation of existing quote recommendation methods on QuoteR. Furthermore, we propose a new quote recommendation model that significantly outperforms previous methods on all three parts of QuoteR. All the code and data of this paper can be obtained at https://github.com/thunlp/QuoteR.", "doc_id": "cb19c62b5d372b9a412d60664b0d3d5c", "publication_year": 2022, "sentences": ["it is very common to use quotations ( quotes ) to make our writings more elegant or convincing .", "to help people find appropriate quotes efficiently , the task of quote recommendation is presented , aiming to recommend quotes that fit the current context of writing .", "there have been various quote recommendation approaches , but they are evaluated on different unpublished datasets .", "to facilitate the research on this task , we build a large and fully open quote recommendation dataset called quoter , which comprises three parts including english , standard chinese and classical chinese .", "any part of it is larger than previous unpublished counterparts .", "we conduct an extensive evaluation of existing quote recommendation methods on quoter . furthermore , we propose a new quote recommendation model that significantly outperforms previous methods on all three parts of quoter . all the code and data of this paper can be obtained at https : / / github . com / thunlp / quoter ."], "events": [{"event_type": "ITT", "arguments": [{"text": "quotations", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["quotations"], "offsets": [6]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [5]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [72]}, {"text": "quote recommendation dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["quote", "recommendation", "dataset"], "offsets": [79, 80, 81]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [73]}}, {"event_type": "CMP", "arguments": [{"text": "larger", "nugget_type": "STR", "argument_type": "Result", "tokens": ["larger"], "offsets": [103]}, {"text": "previous unpublished counterparts", "nugget_type": "DST", "argument_type": "Arg2", "tokens": ["previous", "unpublished", "counterparts"], "offsets": [105, 106, 107]}], "trigger": {"text": "larger", "tokens": ["larger"], "offsets": [103]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [109]}, {"text": "extensive evaluation of existing quote recommendation methods", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["extensive", "evaluation", "of", "existing", "quote", "recommendation", "methods"], "offsets": [112, 113, 114, 115, 116, 117, 118]}, {"text": "quoter", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["quoter"], "offsets": [120]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [110]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [124]}, {"text": "quote recommendation model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["quote", "recommendation", "model"], "offsets": [128, 129, 130]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [125]}}, {"event_type": "CMP", "arguments": [{"text": "quote recommendation model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["quote", "recommendation", "model"], "offsets": [128, 129, 130]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [132]}, {"text": "previous methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "methods"], "offsets": [134, 135]}, {"text": "all three parts of quoter", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["all", "three", "parts", "of", "quoter"], "offsets": [137, 138, 139, 140, 141]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [133]}}], "document": ["it", "is", "very", "common", "to", "use", "quotations", "(", "quotes", ")", "to", "make", "our", "writings", "more", "elegant", "or", "convincing", ".", "to", "help", "people", "find", "appropriate", "quotes", "efficiently", ",", "the", "task", "of", "quote", "recommendation", "is", "presented", ",", "aiming", "to", "recommend", "quotes", "that", "fit", "the", "current", "context", "of", "writing", ".", "there", "have", "been", "various", "quote", "recommendation", "approaches", ",", "but", "they", "are", "evaluated", "on", "different", "unpublished", "datasets", ".", "to", "facilitate", "the", "research", "on", "this", "task", ",", "we", "build", "a", "large", "and", "fully", "open", "quote", "recommendation", "dataset", "called", "quoter", ",", "which", "comprises", "three", "parts", "including", "english", ",", "standard", "chinese", "and", "classical", "chinese", ".", "any", "part", "of", "it", "is", "larger", "than", "previous", "unpublished", "counterparts", ".", "we", "conduct", "an", "extensive", "evaluation", "of", "existing", "quote", "recommendation", "methods", "on", "quoter", ".", "furthermore", ",", "we", "propose", "a", "new", "quote", "recommendation", "model", "that", "significantly", "outperforms", "previous", "methods", "on", "all", "three", "parts", "of", "quoter", ".", "all", "the", "code", "and", "data", "of", "this", "paper", "can", "be", "obtained", "at", "https", ":", "/", "/", "github", ".", "com", "/", "thunlp", "/", "quoter", "."]}, {"venue": "ACL", "title": "Learning Representations from Imperfect Time Series Data via Tensor Rank Regularization", "abstract": "There has been an increased interest in multimodal language processing including multimodal dialog, question answering, sentiment analysis, and speech recognition. However, naturally occurring multimodal data is often imperfect as a result of imperfect modalities, missing entries or noise corruption. To address these concerns, we present a regularization method based on tensor rank minimization. Our method is based on the observation that high-dimensional multimodal time series data often exhibit correlations across time and modalities which leads to low-rank tensor representations. However, the presence of noise or incomplete values breaks these correlations and results in tensor representations of higher rank. We design a model to learn such tensor representations and effectively regularize their rank. Experiments on multimodal language data show that our model achieves good results across various levels of imperfection.", "doc_id": "29967ec466d911b0500527c763e6d2fb", "publication_year": 2019, "sentences": ["there has been an increased interest in multimodal language processing including multimodal dialog , question answering , sentiment analysis , and speech recognition .", "however , naturally occurring multimodal data is often imperfect as a result of imperfect modalities , missing entries or noise corruption .", "to address these concerns , we present a regularization method based on tensor rank minimization .", "our method is based on the observation that high - dimensional multimodal time series data often exhibit correlations across time and modalities which leads to low - rank tensor representations .", "however , the presence of noise or incomplete values breaks these correlations and results in tensor representations of higher rank .", "we design a model to learn such tensor representations and effectively regularize their rank .", "experiments on multimodal language data show that our model achieves good results across various levels of imperfection ."], "events": [{"event_type": "ITT", "arguments": [{"text": "multimodal language processing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multimodal", "language", "processing"], "offsets": [7, 8, 9]}], "trigger": {"text": "increased", "tokens": ["increased"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "naturally occurring multimodal data", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["naturally", "occurring", "multimodal", "data"], "offsets": [26, 27, 28, 29]}, {"text": "imperfect", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["imperfect"], "offsets": [32]}, {"text": "imperfect modalities", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["imperfect", "modalities"], "offsets": [37, 38]}, {"text": "missing entries", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["missing", "entries"], "offsets": [40, 41]}], "trigger": {"text": "imperfect", "tokens": ["imperfect"], "offsets": [32]}}, {"event_type": "PRP", "arguments": [{"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [47]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [51]}, {"text": "regularization method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["regularization", "method"], "offsets": [54, 55]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [52]}}, {"event_type": "PUR", "arguments": [{"text": "imperfect modalities", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["imperfect", "modalities"], "offsets": [37, 38]}, {"text": "missing entries", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["missing", "entries"], "offsets": [40, 41]}, {"text": "noise corruption", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["noise", "corruption"], "offsets": [43, 44]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [47]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [114]}, {"text": "model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model"], "offsets": [117]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [119]}, {"text": "effectively regularize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["effectively", "regularize"], "offsets": [124, 125]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [115]}}, {"event_type": "PUR", "arguments": [{"text": "tensor representations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["tensor", "representations"], "offsets": [121, 122]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [119]}}, {"event_type": "PUR", "arguments": [{"text": "rank", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["rank"], "offsets": [127]}], "trigger": {"text": "effectively regularize", "tokens": ["effectively", "regularize"], "offsets": [124, 125]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [138]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [134]}}, {"event_type": "FAC", "arguments": [{"text": "model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["model"], "offsets": [117]}, {"text": "good results", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["good", "results"], "offsets": [139, 140]}, {"text": "across various levels of imperfection", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "various", "levels", "of", "imperfection"], "offsets": [141, 142, 143, 144, 145]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [138]}}], "document": ["there", "has", "been", "an", "increased", "interest", "in", "multimodal", "language", "processing", "including", "multimodal", "dialog", ",", "question", "answering", ",", "sentiment", "analysis", ",", "and", "speech", "recognition", ".", "however", ",", "naturally", "occurring", "multimodal", "data", "is", "often", "imperfect", "as", "a", "result", "of", "imperfect", "modalities", ",", "missing", "entries", "or", "noise", "corruption", ".", "to", "address", "these", "concerns", ",", "we", "present", "a", "regularization", "method", "based", "on", "tensor", "rank", "minimization", ".", "our", "method", "is", "based", "on", "the", "observation", "that", "high", "-", "dimensional", "multimodal", "time", "series", "data", "often", "exhibit", "correlations", "across", "time", "and", "modalities", "which", "leads", "to", "low", "-", "rank", "tensor", "representations", ".", "however", ",", "the", "presence", "of", "noise", "or", "incomplete", "values", "breaks", "these", "correlations", "and", "results", "in", "tensor", "representations", "of", "higher", "rank", ".", "we", "design", "a", "model", "to", "learn", "such", "tensor", "representations", "and", "effectively", "regularize", "their", "rank", ".", "experiments", "on", "multimodal", "language", "data", "show", "that", "our", "model", "achieves", "good", "results", "across", "various", "levels", "of", "imperfection", "."]}, {"venue": "ACL", "title": "Bad Seeds: Evaluating Lexical Methods for Bias Measurement", "abstract": "A common factor in bias measurement methods is the use of hand-curated seed lexicons, but there remains little guidance for their selection. We gather seeds used in prior work, documenting their common sources and rationales, and in case studies of three English-language corpora, we enumerate the different types of social biases and linguistic features that, once encoded in the seeds, can affect subsequent bias measurements. Seeds developed in one context are often re-used in other contexts, but documentation and evaluation remain necessary precursors to relying on seeds for sensitive measurements.", "doc_id": "32ad1668a37769a7bff02ee61ba45194", "publication_year": 2021, "sentences": ["a common factor in bias measurement methods is the use of hand - curated seed lexicons , but there remains little guidance for their selection .", "we gather seeds used in prior work , documenting their common sources and rationales , and in case studies of three english - language corpora , we enumerate the different types of social biases and linguistic features that , once encoded in the seeds , can affect subsequent bias measurements .", "seeds developed in one context are often re - used in other contexts , but documentation and evaluation remain necessary precursors to relying on seeds for sensitive measurements ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [26]}, {"text": "seeds", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["seeds"], "offsets": [28]}, {"text": "used in prior work", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["used", "in", "prior", "work"], "offsets": [29, 30, 31, 32]}], "trigger": {"text": "gather", "tokens": ["gather"], "offsets": [27]}}, {"event_type": "ITT", "arguments": [{"text": "hand - curated seed lexicons", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["hand", "-", "curated", "seed", "lexicons"], "offsets": [11, 12, 13, 14, 15]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [9]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [26]}, {"text": "common sources", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["common", "sources"], "offsets": [36, 37]}, {"text": "rationales", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["rationales"], "offsets": [39]}], "trigger": {"text": "documenting", "tokens": ["documenting"], "offsets": [34]}}, {"event_type": "WKS", "arguments": [{"text": "three english - language corpora", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["three", "english", "-", "language", "corpora"], "offsets": [46, 47, 48, 49, 50]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [52]}, {"text": "social biases", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["social", "biases"], "offsets": [58, 59]}, {"text": "linguistic features", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["linguistic", "features"], "offsets": [61, 62]}], "trigger": {"text": "enumerate", "tokens": ["enumerate"], "offsets": [53]}}, {"event_type": "FAC", "arguments": [{"text": "seeds for sensitive measurements", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["seeds", "for", "sensitive", "measurements"], "offsets": [101, 102, 103, 104]}, {"text": "documentation", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["documentation"], "offsets": [92]}, {"text": "evaluation", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["evaluation"], "offsets": [94]}], "trigger": {"text": "necessary", "tokens": ["necessary"], "offsets": [96]}}], "document": ["a", "common", "factor", "in", "bias", "measurement", "methods", "is", "the", "use", "of", "hand", "-", "curated", "seed", "lexicons", ",", "but", "there", "remains", "little", "guidance", "for", "their", "selection", ".", "we", "gather", "seeds", "used", "in", "prior", "work", ",", "documenting", "their", "common", "sources", "and", "rationales", ",", "and", "in", "case", "studies", "of", "three", "english", "-", "language", "corpora", ",", "we", "enumerate", "the", "different", "types", "of", "social", "biases", "and", "linguistic", "features", "that", ",", "once", "encoded", "in", "the", "seeds", ",", "can", "affect", "subsequent", "bias", "measurements", ".", "seeds", "developed", "in", "one", "context", "are", "often", "re", "-", "used", "in", "other", "contexts", ",", "but", "documentation", "and", "evaluation", "remain", "necessary", "precursors", "to", "relying", "on", "seeds", "for", "sensitive", "measurements", "."]}, {"venue": "ACL", "title": "Robust Lottery Tickets for Pre-trained Language Models", "abstract": "Recent works on Lottery Ticket Hypothesis have shown that pre-trained language models (PLMs) contain smaller matching subnetworks(winning tickets) which are capable of reaching accuracy comparable to the original models. However, these tickets are proved to be notrobust to adversarial examples, and even worse than their PLM counterparts. To address this problem, we propose a novel method based on learning binary weight masks to identify robust tickets hidden in the original PLMs. Since the loss is not differentiable for the binary mask, we assign the hard concrete distribution to the masks and encourage their sparsity using a smoothing approximation of L0 regularization.Furthermore, we design an adversarial loss objective to guide the search for robust tickets and ensure that the tickets perform well bothin accuracy and robustness. Experimental results show the significant improvement of the proposed method over previous work on adversarial robustness evaluation.", "doc_id": "e9eb5c60447eba8e392594330d2fd963", "publication_year": 2022, "sentences": ["recent works on lottery ticket hypothesis have shown that pre - trained language models ( plms ) contain smaller matching subnetworks ( winning tickets ) which are capable of reaching accuracy comparable to the original models .", "however , these tickets are proved to be notrobust to adversarial examples , and even worse than their plm counterparts .", "to address this problem , we propose a novel method based on learning binary weight masks to identify robust tickets hidden in the original plms .", "since the loss is not differentiable for the binary mask , we assign the hard concrete distribution to the masks and encourage their sparsity using a smoothing approximation of l0 regularization .", "furthermore , we design an adversarial loss objective to guide the search for robust tickets and ensure that the tickets perform well bothin accuracy and robustness .", "experimental results show the significant improvement of the proposed method over previous work on adversarial robustness evaluation ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pre - trained language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["plms"], "offsets": [82]}], "trigger": {"text": "contain", "tokens": ["contain"], "offsets": [17]}}, {"event_type": "RWF", "arguments": [{"text": "tickets", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["tickets"], "offsets": [40]}, {"text": "notrobust", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["notrobust"], "offsets": [45]}], "trigger": {"text": "proved", "tokens": ["proved"], "offsets": [42]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [63]}, {"text": "method based on learning binary weight masks", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method", "based", "on", "learning", "binary", "weight", "masks"], "offsets": [67, 68, 69, 70, 71, 72, 73]}, {"text": "identify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["identify"], "offsets": [75]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [64]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [95]}, {"text": "hard concrete distribution", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["hard", "concrete", "distribution"], "offsets": [98, 99, 100]}, {"text": "to the masks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "the", "masks"], "offsets": [101, 102, 103]}], "trigger": {"text": "assign", "tokens": ["assign"], "offsets": [96]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [95]}, {"text": "encourage", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["encourage"], "offsets": [105]}, {"text": "smoothing approximation of l0 regularization", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["smoothing", "approximation", "of", "l0", "regularization"], "offsets": [110, 111, 112, 113, 114]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [108]}}, {"event_type": "PUR", "arguments": [{"text": "sparsity", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["sparsity"], "offsets": [107]}], "trigger": {"text": "encourage", "tokens": ["encourage"], "offsets": [105]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [118]}, {"text": "adversarial loss objective", "nugget_type": "APP", "argument_type": "Content", "tokens": ["adversarial", "loss", "objective"], "offsets": [121, 122, 123]}, {"text": "guide", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["guide"], "offsets": [125]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [119]}}, {"event_type": "PUR", "arguments": [{"text": "search for robust tickets", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["search", "for", "robust", "tickets"], "offsets": [127, 128, 129, 130]}], "trigger": {"text": "guide", "tokens": ["guide"], "offsets": [125]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [118]}, {"text": "perform", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["perform"], "offsets": [136]}], "trigger": {"text": "ensure", "tokens": ["ensure"], "offsets": [132]}}, {"event_type": "FAC", "arguments": [{"text": "tickets", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["tickets"], "offsets": [135]}, {"text": "well", "nugget_type": "STR", "argument_type": "Object", "tokens": ["well"], "offsets": [137]}, {"text": "accuracy", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["accuracy"], "offsets": [139]}, {"text": "robustness", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["robustness"], "offsets": [141]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [136]}}, {"event_type": "CMP", "arguments": [{"text": "previous work", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "work"], "offsets": [154, 155]}, {"text": "adversarial robustness evaluation", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["adversarial", "robustness", "evaluation"], "offsets": [157, 158, 159]}, {"text": "significant improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significant", "improvement"], "offsets": [147, 148]}, {"text": "proposed method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["proposed", "method"], "offsets": [151, 152]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [145]}}, {"event_type": "RWF", "arguments": [{"text": "tickets", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["tickets"], "offsets": [40]}, {"text": "worse", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["worse"], "offsets": [52]}], "trigger": {"text": "worse", "tokens": ["worse"], "offsets": [52]}}, {"event_type": "PUR", "arguments": [{"text": "robust tickets", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["robust", "tickets"], "offsets": [76, 77]}, {"text": "in the original plms", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "original", "plms"], "offsets": [79, 80, 81, 82]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [75]}}], "document": ["recent", "works", "on", "lottery", "ticket", "hypothesis", "have", "shown", "that", "pre", "-", "trained", "language", "models", "(", "plms", ")", "contain", "smaller", "matching", "subnetworks", "(", "winning", "tickets", ")", "which", "are", "capable", "of", "reaching", "accuracy", "comparable", "to", "the", "original", "models", ".", "however", ",", "these", "tickets", "are", "proved", "to", "be", "notrobust", "to", "adversarial", "examples", ",", "and", "even", "worse", "than", "their", "plm", "counterparts", ".", "to", "address", "this", "problem", ",", "we", "propose", "a", "novel", "method", "based", "on", "learning", "binary", "weight", "masks", "to", "identify", "robust", "tickets", "hidden", "in", "the", "original", "plms", ".", "since", "the", "loss", "is", "not", "differentiable", "for", "the", "binary", "mask", ",", "we", "assign", "the", "hard", "concrete", "distribution", "to", "the", "masks", "and", "encourage", "their", "sparsity", "using", "a", "smoothing", "approximation", "of", "l0", "regularization", ".", "furthermore", ",", "we", "design", "an", "adversarial", "loss", "objective", "to", "guide", "the", "search", "for", "robust", "tickets", "and", "ensure", "that", "the", "tickets", "perform", "well", "bothin", "accuracy", "and", "robustness", ".", "experimental", "results", "show", "the", "significant", "improvement", "of", "the", "proposed", "method", "over", "previous", "work", "on", "adversarial", "robustness", "evaluation", "."]}, {"venue": "ACL", "title": "What Should I Ask? Using Conversationally Informative Rewards for Goal-oriented Visual Dialog.", "abstract": "The ability to engage in goal-oriented conversations has allowed humans to gain knowledge, reduce uncertainty, and perform tasks more efficiently. Artificial agents, however, are still far behind humans in having goal-driven conversations. In this work, we focus on the task of goal-oriented visual dialogue, aiming to automatically generate a series of questions about an image with a single objective. This task is challenging since these questions must not only be consistent with a strategy to achieve a goal, but also consider the contextual information in the image. We propose an end-to-end goal-oriented visual dialogue system, that combines reinforcement learning with regularized information gain. Unlike previous approaches that have been proposed for the task, our work is motivated by the Rational Speech Act framework, which models the process of human inquiry to reach a goal. We test the two versions of our model on the GuessWhat?! dataset, obtaining significant results that outperform the current state-of-the-art models in the task of generating questions to find an undisclosed object in an image.", "doc_id": "eefa0939e592bdf3c6b18892b0198941", "publication_year": 2019, "sentences": ["the ability to engage in goal - oriented conversations has allowed humans to gain knowledge , reduce uncertainty , and perform tasks more efficiently .", "artificial agents , however , are still far behind humans in having goal - driven conversations .", "in this work , we focus on the task of goal - oriented visual dialogue , aiming to automatically generate a series of questions about an image with a single objective .", "this task is challenging since these questions must not only be consistent with a strategy to achieve a goal , but also consider the contextual information in the image .", "we propose an end - to - end goal - oriented visual dialogue system , that combines reinforcement learning with regularized information gain .", "unlike previous approaches that have been proposed for the task , our work is motivated by the rational speech act framework , which models the process of human inquiry to reach a goal .", "we test the two versions of our model on the guesswhat ? ! dataset , obtaining significant results that outperform the current state - of - the - art models in the task of generating questions to find an undisclosed object in an image ."], "events": [{"event_type": "ITT", "arguments": [{"text": "goal - oriented conversations", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["goal", "-", "oriented", "conversations"], "offsets": [5, 6, 7, 8]}], "trigger": {"text": "allowed", "tokens": ["allowed"], "offsets": [10]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [46]}, {"text": "task of goal - oriented visual dialogue", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["task", "of", "goal", "-", "oriented", "visual", "dialogue"], "offsets": [50, 51, 52, 53, 54, 55, 56]}, {"text": "automatically generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["automatically", "generate"], "offsets": [60, 61]}], "trigger": {"text": "focus", "tokens": ["focus"], "offsets": [47]}}, {"event_type": "PUR", "arguments": [{"text": "series of questions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["series", "of", "questions"], "offsets": [63, 64, 65]}, {"text": "about an image with a single objective", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["about", "an", "image", "with", "a", "single", "objective"], "offsets": [66, 67, 68, 69, 70, 71, 72]}], "trigger": {"text": "automatically generate", "tokens": ["automatically", "generate"], "offsets": [60, 61]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [104]}, {"text": "end - to - end goal - oriented visual dialogue system", "nugget_type": "APP", "argument_type": "Content", "tokens": ["end", "-", "to", "-", "end", "goal", "-", "oriented", "visual", "dialogue", "system"], "offsets": [107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117]}, {"text": "combines", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["combines"], "offsets": [120]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [105]}}, {"event_type": "PUR", "arguments": [{"text": "reinforcement learning", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["reinforcement", "learning"], "offsets": [121, 122]}, {"text": "regularized information gain", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["regularized", "information", "gain"], "offsets": [124, 125, 126]}], "trigger": {"text": "combines", "tokens": ["combines"], "offsets": [120]}}, {"event_type": "WKS", "arguments": [{"text": "process of human inquiry to reach a goal", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["process", "of", "human", "inquiry", "to", "reach", "a", "goal"], "offsets": [153, 154, 155, 156, 157, 158, 159, 160]}], "trigger": {"text": "models", "tokens": ["models"], "offsets": [151]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [162]}, {"text": "guesswhat ? ! dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["guesswhat", "?", "!", "dataset"], "offsets": [172, 173, 174, 175]}, {"text": "two versions of our model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "versions", "of", "end", "-", "to", "-", "end", "goal", "-", "oriented", "visual", "dialogue", "system"], "offsets": [165, 166, 167, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117]}], "trigger": {"text": "test", "tokens": ["test"], "offsets": [163]}}, {"event_type": "FIN", "arguments": [{"text": "outperform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperform"], "offsets": [181]}], "trigger": {"text": "obtaining", "tokens": ["obtaining"], "offsets": [177]}}, {"event_type": "CMP", "arguments": [{"text": "outperform", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperform"], "offsets": [181]}, {"text": "current state - of - the - art models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "state", "-", "of", "-", "the", "-", "art", "models"], "offsets": [183, 184, 185, 186, 187, 188, 189, 190, 191]}, {"text": "generating questions to find an undisclosed object", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["generating", "questions", "to", "find", "an", "undisclosed", "object"], "offsets": [196, 197, 198, 199, 200, 201, 202]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [181]}}], "document": ["the", "ability", "to", "engage", "in", "goal", "-", "oriented", "conversations", "has", "allowed", "humans", "to", "gain", "knowledge", ",", "reduce", "uncertainty", ",", "and", "perform", "tasks", "more", "efficiently", ".", "artificial", "agents", ",", "however", ",", "are", "still", "far", "behind", "humans", "in", "having", "goal", "-", "driven", "conversations", ".", "in", "this", "work", ",", "we", "focus", "on", "the", "task", "of", "goal", "-", "oriented", "visual", "dialogue", ",", "aiming", "to", "automatically", "generate", "a", "series", "of", "questions", "about", "an", "image", "with", "a", "single", "objective", ".", "this", "task", "is", "challenging", "since", "these", "questions", "must", "not", "only", "be", "consistent", "with", "a", "strategy", "to", "achieve", "a", "goal", ",", "but", "also", "consider", "the", "contextual", "information", "in", "the", "image", ".", "we", "propose", "an", "end", "-", "to", "-", "end", "goal", "-", "oriented", "visual", "dialogue", "system", ",", "that", "combines", "reinforcement", "learning", "with", "regularized", "information", "gain", ".", "unlike", "previous", "approaches", "that", "have", "been", "proposed", "for", "the", "task", ",", "our", "work", "is", "motivated", "by", "the", "rational", "speech", "act", "framework", ",", "which", "models", "the", "process", "of", "human", "inquiry", "to", "reach", "a", "goal", ".", "we", "test", "the", "two", "versions", "of", "our", "model", "on", "the", "guesswhat", "?", "!", "dataset", ",", "obtaining", "significant", "results", "that", "outperform", "the", "current", "state", "-", "of", "-", "the", "-", "art", "models", "in", "the", "task", "of", "generating", "questions", "to", "find", "an", "undisclosed", "object", "in", "an", "image", "."]}, {"venue": "ACL", "title": "The PhotoBook Dataset: Building Common Ground through Visually-Grounded Dialogue", "abstract": "This paper introduces the PhotoBook dataset, a large-scale collection of visually-grounded, task-oriented dialogues in English designed to investigate shared dialogue history accumulating during conversation. Taking inspiration from seminal work on dialogue analysis, we propose a data-collection task formulated as a collaborative game prompting two online participants to refer to images utilising both their visual context as well as previously established referring expressions. We provide a detailed description of the task setup and a thorough analysis of the 2,500 dialogues collected. To further illustrate the novel features of the dataset, we propose a baseline model for reference resolution which uses a simple method to take into account shared information accumulated in a reference chain. Our results show that this information is particularly important to resolve later descriptions and underline the need to develop more sophisticated models of common ground in dialogue interaction.", "doc_id": "d9122d41e15c6ca4a515e927762286a2", "publication_year": 2019, "sentences": ["this paper introduces the photobook dataset , a large - scale collection of visually - grounded , task - oriented dialogues in english designed to investigate shared dialogue history accumulating during conversation .", "taking inspiration from seminal work on dialogue analysis , we propose a data - collection task formulated as a collaborative game prompting two online participants to refer to images utilising both their visual context as well as previously established referring expressions .", "we provide a detailed description of the task setup and a thorough analysis of the 2 , 500 dialogues collected .", "to further illustrate the novel features of the dataset , we propose a baseline model for reference resolution which uses a simple method to take into account shared information accumulated in a reference chain .", "our results show that this information is particularly important to resolve later descriptions and underline the need to develop more sophisticated models of common ground in dialogue interaction ."], "events": [{"event_type": "PRP", "arguments": [{"text": "photobook dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["photobook", "dataset"], "offsets": [4, 5]}, {"text": "investigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["investigate"], "offsets": [25]}], "trigger": {"text": "introduces", "tokens": ["introduces"], "offsets": [2]}}, {"event_type": "PUR", "arguments": [{"text": "shared dialogue history", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["shared", "dialogue", "history"], "offsets": [26, 27, 28]}, {"text": "during conversation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "conversation"], "offsets": [30, 31]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [25]}}, {"event_type": "PRP", "arguments": [{"text": "data - collection task", "nugget_type": "APP", "argument_type": "Content", "tokens": ["data", "-", "collection", "task"], "offsets": [45, 46, 47, 48]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [42]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [43]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [75]}, {"text": "detailed description of the task setup", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["detailed", "description", "of", "the", "task", "setup"], "offsets": [78, 79, 80, 81, 82, 83]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [76]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [75]}, {"text": "2 , 500 dialogues collected", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["2", ",", "500", "dialogues", "collected"], "offsets": [90, 91, 92, 93, 94]}], "trigger": {"text": "analysis", "tokens": ["analysis"], "offsets": [87]}}, {"event_type": "PRP", "arguments": [{"text": "further illustrate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["further", "illustrate"], "offsets": [97, 98]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [106]}, {"text": "baseline model for reference resolution", "nugget_type": "APP", "argument_type": "Content", "tokens": ["baseline", "model", "for", "reference", "resolution"], "offsets": [109, 110, 111, 112, 113]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [107]}}, {"event_type": "PUR", "arguments": [{"text": "features of the dataset", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["features", "of", "the", "dataset"], "offsets": [101, 102, 103, 104]}], "trigger": {"text": "further illustrate", "tokens": ["further", "illustrate"], "offsets": [97, 98]}}, {"event_type": "FIN", "arguments": [{"text": "important", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["important"], "offsets": [139]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [133]}}, {"event_type": "FAC", "arguments": [{"text": "shared information", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["shared", "information"], "offsets": [123, 124]}, {"text": "resolve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["resolve"], "offsets": [141]}], "trigger": {"text": "important", "tokens": ["important"], "offsets": [139]}}, {"event_type": "PUR", "arguments": [{"text": "later descriptions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["later", "descriptions"], "offsets": [142, 143]}], "trigger": {"text": "resolve", "tokens": ["resolve"], "offsets": [141]}}, {"event_type": "FAC", "arguments": [{"text": "need", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["need"], "offsets": [147]}, {"text": "develop", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["develop"], "offsets": [149]}], "trigger": {"text": "underline", "tokens": ["underline"], "offsets": [145]}}, {"event_type": "PUR", "arguments": [{"text": "more sophisticated models of common ground", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["more", "sophisticated", "models", "of", "common", "ground"], "offsets": [150, 151, 152, 153, 154, 155]}, {"text": "in dialogue interaction", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "dialogue", "interaction"], "offsets": [156, 157, 158]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [149]}}, {"event_type": "MDS", "arguments": [{"text": "visual context", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["visual", "context"], "offsets": [65, 66]}, {"text": "previously established referring expressions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["previously", "established", "referring", "expressions"], "offsets": [70, 71, 72, 73]}, {"text": "refer", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["refer"], "offsets": [59]}], "trigger": {"text": "utilising", "tokens": ["utilising"], "offsets": [62]}}, {"event_type": "PUR", "arguments": [{"text": "images", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["images"], "offsets": [61]}], "trigger": {"text": "refer", "tokens": ["refer"], "offsets": [59]}}, {"event_type": "MDS", "arguments": [{"text": "shared information", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["shared", "information"], "offsets": [123, 124]}, {"text": "reference chain", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["reference", "chain"], "offsets": [128, 129]}], "trigger": {"text": "accumulated", "tokens": ["accumulated"], "offsets": [125]}}], "document": ["this", "paper", "introduces", "the", "photobook", "dataset", ",", "a", "large", "-", "scale", "collection", "of", "visually", "-", "grounded", ",", "task", "-", "oriented", "dialogues", "in", "english", "designed", "to", "investigate", "shared", "dialogue", "history", "accumulating", "during", "conversation", ".", "taking", "inspiration", "from", "seminal", "work", "on", "dialogue", "analysis", ",", "we", "propose", "a", "data", "-", "collection", "task", "formulated", "as", "a", "collaborative", "game", "prompting", "two", "online", "participants", "to", "refer", "to", "images", "utilising", "both", "their", "visual", "context", "as", "well", "as", "previously", "established", "referring", "expressions", ".", "we", "provide", "a", "detailed", "description", "of", "the", "task", "setup", "and", "a", "thorough", "analysis", "of", "the", "2", ",", "500", "dialogues", "collected", ".", "to", "further", "illustrate", "the", "novel", "features", "of", "the", "dataset", ",", "we", "propose", "a", "baseline", "model", "for", "reference", "resolution", "which", "uses", "a", "simple", "method", "to", "take", "into", "account", "shared", "information", "accumulated", "in", "a", "reference", "chain", ".", "our", "results", "show", "that", "this", "information", "is", "particularly", "important", "to", "resolve", "later", "descriptions", "and", "underline", "the", "need", "to", "develop", "more", "sophisticated", "models", "of", "common", "ground", "in", "dialogue", "interaction", "."]}, {"venue": "ACL", "title": "The Paradox of the Compositionality of Natural Language: A Neural Machine Translation Case Study", "abstract": "Obtaining human-like performance in NLP is often argued to require compositional generalisation. Whether neural networks exhibit this ability is usually studied by training models on highly compositional synthetic data. However, compositionality in natural language is much more complex than the rigid, arithmetic-like version such data adheres to, and artificial compositionality tests thus do not allow us to determine how neural models deal with more realistic forms of compositionality. In this work, we re-instantiate three compositionality tests from the literature and reformulate them for neural machine translation (NMT).Our results highlight that: i) unfavourably, models trained on more data are more compositional; ii) models are sometimes less compositional than expected, but sometimes more, exemplifying that different levels of compositionality are required, and models are not always able to modulate between them correctly; iii) some of the non-compositional behaviours are mistakes, whereas others reflect the natural variation in data. Apart from an empirical study, our work is a call to action: we should rethink the evaluation of compositionality in neural networks and develop benchmarks using real data to evaluate compositionality on natural language, where composing meaning is not as straightforward as doing the math.", "doc_id": "3c59c61e61363113b3692b57e1aab7ae", "publication_year": 2022, "sentences": ["obtaining human - like performance in nlp is often argued to require compositional generalisation .", "whether neural networks exhibit this ability is usually studied by training models on highly compositional synthetic data .", "however , compositionality in natural language is much more complex than the rigid , arithmetic - like version such data adheres to , and artificial compositionality tests thus do not allow us to determine how neural models deal with more realistic forms of compositionality .", "in this work , we re - instantiate three compositionality tests from the literature and reformulate them for neural machine translation ( nmt ) .", "our results highlight that : i ) unfavourably , models trained on more data are more compositional ; ii ) models are sometimes less compositional than expected , but sometimes more , exemplifying that different levels of compositionality are required , and models are not always able to modulate between them correctly ; iii ) some of the non - compositional behaviours are mistakes , whereas others reflect the natural variation in data .", "apart from an empirical study , our work is a call to action : we should rethink the evaluation of compositionality in neural networks and develop benchmarks using real data to evaluate compositionality on natural language , where composing meaning is not as straightforward as doing the math ."], "events": [{"event_type": "ITT", "arguments": [{"text": "compositional generalisation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["compositional", "generalisation"], "offsets": [12, 13]}], "trigger": {"text": "require", "tokens": ["require"], "offsets": [11]}}, {"event_type": "RWS", "arguments": [{"text": "highly compositional synthetic data", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["highly", "compositional", "synthetic", "data"], "offsets": [28, 29, 30, 31]}, {"text": "models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["models"], "offsets": [26]}, {"text": "neural networks", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["neural", "networks"], "offsets": [16, 17]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [25]}}, {"event_type": "RWF", "arguments": [{"text": "artificial compositionality tests", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["artificial", "compositionality", "tests"], "offsets": [57, 58, 59]}, {"text": "more realistic forms", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["more", "realistic", "forms"], "offsets": [72, 73, 74]}], "trigger": {"text": "not allow", "tokens": ["not", "allow"], "offsets": [62, 63]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [82]}, {"text": "three compositionality tests", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["three", "compositionality", "tests"], "offsets": [86, 87, 88]}, {"text": "neural machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["neural", "machine", "translation"], "offsets": [96, 97, 98]}], "trigger": {"text": "re - instantiate", "tokens": ["re", "-", "instantiate"], "offsets": [83, 84, 85]}}, {"event_type": "CMP", "arguments": [{"text": "models trained on more data", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["models", "trained", "on", "more", "data"], "offsets": [112, 113, 114, 115, 116]}], "trigger": {"text": "more compositional", "tokens": ["more", "compositional"], "offsets": [118, 119]}}, {"event_type": "FAC", "arguments": [{"text": "different levels of compositionality", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["different", "levels", "of", "compositionality"], "offsets": [137, 138, 139, 140]}], "trigger": {"text": "required", "tokens": ["required"], "offsets": [142]}}, {"event_type": "FAC", "arguments": [{"text": "non - compositional behaviours", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["non", "-", "compositional", "behaviours"], "offsets": [161, 162, 163, 164]}], "trigger": {"text": "mistakes", "tokens": ["mistakes"], "offsets": [166]}}, {"event_type": "FAC", "arguments": [{"text": "others", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["non", "-", "compositional", "behaviours"], "offsets": [161, 162, 163, 164]}, {"text": "natural variation", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["natural", "variation"], "offsets": [172, 173]}], "trigger": {"text": "reflect", "tokens": ["reflect"], "offsets": [170]}}], "document": ["obtaining", "human", "-", "like", "performance", "in", "nlp", "is", "often", "argued", "to", "require", "compositional", "generalisation", ".", "whether", "neural", "networks", "exhibit", "this", "ability", "is", "usually", "studied", "by", "training", "models", "on", "highly", "compositional", "synthetic", "data", ".", "however", ",", "compositionality", "in", "natural", "language", "is", "much", "more", "complex", "than", "the", "rigid", ",", "arithmetic", "-", "like", "version", "such", "data", "adheres", "to", ",", "and", "artificial", "compositionality", "tests", "thus", "do", "not", "allow", "us", "to", "determine", "how", "neural", "models", "deal", "with", "more", "realistic", "forms", "of", "compositionality", ".", "in", "this", "work", ",", "we", "re", "-", "instantiate", "three", "compositionality", "tests", "from", "the", "literature", "and", "reformulate", "them", "for", "neural", "machine", "translation", "(", "nmt", ")", ".", "our", "results", "highlight", "that", ":", "i", ")", "unfavourably", ",", "models", "trained", "on", "more", "data", "are", "more", "compositional", ";", "ii", ")", "models", "are", "sometimes", "less", "compositional", "than", "expected", ",", "but", "sometimes", "more", ",", "exemplifying", "that", "different", "levels", "of", "compositionality", "are", "required", ",", "and", "models", "are", "not", "always", "able", "to", "modulate", "between", "them", "correctly", ";", "iii", ")", "some", "of", "the", "non", "-", "compositional", "behaviours", "are", "mistakes", ",", "whereas", "others", "reflect", "the", "natural", "variation", "in", "data", ".", "apart", "from", "an", "empirical", "study", ",", "our", "work", "is", "a", "call", "to", "action", ":", "we", "should", "rethink", "the", "evaluation", "of", "compositionality", "in", "neural", "networks", "and", "develop", "benchmarks", "using", "real", "data", "to", "evaluate", "compositionality", "on", "natural", "language", ",", "where", "composing", "meaning", "is", "not", "as", "straightforward", "as", "doing", "the", "math", "."]}, {"venue": "ACL", "title": "The Curse of Dense Low-Dimensional Information Retrieval for Large Index Sizes", "abstract": "Information Retrieval using dense low-dimensional representations recently became popular and showed out-performance to traditional sparse-representations like BM25. However, no previous work investigated how dense representations perform with large index sizes. We show theoretically and empirically that the performance for dense representations decreases quicker than sparse representations for increasing index sizes. In extreme cases, this can even lead to a tipping point where at a certain index size sparse representations outperform dense representations. We show that this behavior is tightly connected to the number of dimensions of the representations: The lower the dimension, the higher the chance for false positives, i.e. returning irrelevant documents", "doc_id": "ee6b074ace5c90f93849c39d094a8fd8", "publication_year": 2021, "sentences": ["information retrieval using dense low - dimensional representations recently became popular and showed out - performance to traditional sparse - representations like bm25 .", "however , no previous work investigated how dense representations perform with large index sizes .", "we show theoretically and empirically that the performance for dense representations decreases quicker than sparse representations for increasing index sizes .", "in extreme cases , this can even lead to a tipping point where at a certain index size sparse representations outperform dense representations .", "we show that this behavior is tightly connected to the number of dimensions of the representations : the lower the dimension , the higher the chance for false positives , i . e . returning irrelevant documents"], "events": [{"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [39]}, {"text": "decreases", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["decreases"], "offsets": [50]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [40]}}, {"event_type": "CMP", "arguments": [{"text": "for increasing index sizes", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "increasing", "index", "sizes"], "offsets": [55, 56, 57, 58]}, {"text": "quicker", "nugget_type": "WEA", "argument_type": "Result", "tokens": ["quicker"], "offsets": [51]}, {"text": "dense representations", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["dense", "representations"], "offsets": [48, 49]}, {"text": "sparse representations", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["sparse", "representations"], "offsets": [53, 54]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [46]}], "trigger": {"text": "decreases", "tokens": ["decreases"], "offsets": [50]}}, {"event_type": "CMP", "arguments": [{"text": "at a certain index size", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "a", "certain", "index", "size"], "offsets": [73, 74, 75, 76, 77]}, {"text": "sparse representations", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["sparse", "representations"], "offsets": [78, 79]}, {"text": "outperform", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperform"], "offsets": [80]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [80]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [84]}, {"text": "tightly connected", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["tightly", "connected"], "offsets": [90, 91]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [85]}}, {"event_type": "FAC", "arguments": [{"text": "behavior", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["behavior"], "offsets": [88]}, {"text": "number of dimensions of the representations", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["number", "of", "dimensions", "of", "the", "representations"], "offsets": [94, 95, 96, 97, 98, 99]}], "trigger": {"text": "tightly connected", "tokens": ["tightly", "connected"], "offsets": [90, 91]}}, {"event_type": "ITT", "arguments": [{"text": "information retrieval", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["information", "retrieval"], "offsets": [0, 1]}], "trigger": {"text": "became", "tokens": ["became"], "offsets": [9]}}], "document": ["information", "retrieval", "using", "dense", "low", "-", "dimensional", "representations", "recently", "became", "popular", "and", "showed", "out", "-", "performance", "to", "traditional", "sparse", "-", "representations", "like", "bm25", ".", "however", ",", "no", "previous", "work", "investigated", "how", "dense", "representations", "perform", "with", "large", "index", "sizes", ".", "we", "show", "theoretically", "and", "empirically", "that", "the", "performance", "for", "dense", "representations", "decreases", "quicker", "than", "sparse", "representations", "for", "increasing", "index", "sizes", ".", "in", "extreme", "cases", ",", "this", "can", "even", "lead", "to", "a", "tipping", "point", "where", "at", "a", "certain", "index", "size", "sparse", "representations", "outperform", "dense", "representations", ".", "we", "show", "that", "this", "behavior", "is", "tightly", "connected", "to", "the", "number", "of", "dimensions", "of", "the", "representations", ":", "the", "lower", "the", "dimension", ",", "the", "higher", "the", "chance", "for", "false", "positives", ",", "i", ".", "e", ".", "returning", "irrelevant", "documents"]}, {"venue": "ACL", "title": "Zero-Shot Dependency Parsing with Worst-Case Aware Automated Curriculum Learning", "abstract": "Large multilingual pretrained language models such as mBERT and XLM-RoBERTa have been found to be surprisingly effective for cross-lingual transfer of syntactic parsing models Wu and Dredze (2019), but only between related languages. However, source and training languages are rarely related, when parsing truly low-resource languages. To close this gap, we adopt a method from multi-task learning, which relies on automated curriculum learning, to dynamically optimize for parsing performance on outlier languages. We show that this approach is significantly better than uniform and size-proportional sampling in the zero-shot setting.", "doc_id": "56edf655519dca664eaf06f5c6534901", "publication_year": 2022, "sentences": ["large multilingual pretrained language models such as mbert and xlm - roberta have been found to be surprisingly effective for cross - lingual transfer of syntactic parsing models wu and dredze ( 2019 ) , but only between related languages .", "however , source and training languages are rarely related , when parsing truly low - resource languages .", "to close this gap , we adopt a method from multi - task learning , which relies on automated curriculum learning , to dynamically optimize for parsing performance on outlier languages .", "we show that this approach is significantly better than uniform and size - proportional sampling in the zero - shot setting ."], "events": [{"event_type": "ITT", "arguments": [{"text": "large multilingual pretrained language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["large", "multilingual", "pretrained", "language", "models"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "found", "tokens": ["found"], "offsets": [14]}}, {"event_type": "RWF", "arguments": [{"text": "rarely related", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["rarely", "related"], "offsets": [48, 49]}, {"text": "when parsing truly low - resource languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "parsing", "truly", "low", "-", "resource", "languages"], "offsets": [51, 52, 53, 54, 55, 56, 57]}, {"text": "source languages", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["source", "languages"], "offsets": [43, 46]}], "trigger": {"text": "rarely related", "tokens": ["rarely", "related"], "offsets": [48, 49]}}, {"event_type": "PUR", "arguments": [{"text": "performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["performance"], "offsets": [86]}, {"text": "on outlier languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "outlier", "languages"], "offsets": [87, 88, 89]}], "trigger": {"text": "parsing", "tokens": ["parsing"], "offsets": [85]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [91]}, {"text": "better", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["better"], "offsets": [98]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [92]}}, {"event_type": "CMP", "arguments": [{"text": "uniform and size - proportional sampling", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["uniform", "and", "size", "-", "proportional", "sampling"], "offsets": [100, 101, 102, 103, 104, 105]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["approach"], "offsets": [95]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [98]}, {"text": "in the zero - shot setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "zero", "-", "shot", "setting"], "offsets": [106, 107, 108, 109, 110, 111]}], "trigger": {"text": "better", "tokens": ["better"], "offsets": [98]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [64]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [67]}, {"text": "from multi - task learning", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "approach"], "offsets": [68, 95]}], "trigger": {"text": "adopt", "tokens": ["adopt"], "offsets": [65]}}, {"event_type": "WKS", "arguments": [{"text": "automated curriculum learning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["automated", "curriculum", "learning"], "offsets": [77, 78, 79]}, {"text": "parsing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["parsing"], "offsets": [85]}], "trigger": {"text": "relies on", "tokens": ["relies", "on"], "offsets": [75, 76]}}], "document": ["large", "multilingual", "pretrained", "language", "models", "such", "as", "mbert", "and", "xlm", "-", "roberta", "have", "been", "found", "to", "be", "surprisingly", "effective", "for", "cross", "-", "lingual", "transfer", "of", "syntactic", "parsing", "models", "wu", "and", "dredze", "(", "2019", ")", ",", "but", "only", "between", "related", "languages", ".", "however", ",", "source", "and", "training", "languages", "are", "rarely", "related", ",", "when", "parsing", "truly", "low", "-", "resource", "languages", ".", "to", "close", "this", "gap", ",", "we", "adopt", "a", "method", "from", "multi", "-", "task", "learning", ",", "which", "relies", "on", "automated", "curriculum", "learning", ",", "to", "dynamically", "optimize", "for", "parsing", "performance", "on", "outlier", "languages", ".", "we", "show", "that", "this", "approach", "is", "significantly", "better", "than", "uniform", "and", "size", "-", "proportional", "sampling", "in", "the", "zero", "-", "shot", "setting", "."]}, {"venue": "ACL", "title": "Modeling Affirmative and Negated Action Processing in the Brain with Lexical and Compositional Semantic Models", "abstract": "Recent work shows that distributional semantic models can be used to decode patterns of brain activity associated with individual words and sentence meanings. However, it is yet unclear to what extent such models can be used to study and decode fMRI patterns associated with specific aspects of semantic composition such as the negation function. In this paper, we apply lexical and compositional semantic models to decode fMRI patterns associated with negated and affirmative sentences containing hand-action verbs. Our results show reduced decoding (correlation) of sentences where the verb is in the negated context, as compared to the affirmative one, within brain regions implicated in action-semantic processing. This supports behavioral and brain imaging studies, suggesting that negation involves reduced access to aspects of the affirmative mental representation. The results pave the way for testing alternate semantic models of negation against human semantic processing in the brain.", "doc_id": "74bac5d958a9bb15e9b965a980df68c5", "publication_year": 2019, "sentences": ["recent work shows that distributional semantic models can be used to decode patterns of brain activity associated with individual words and sentence meanings .", "however , it is yet unclear to what extent such models can be used to study and decode fmri patterns associated with specific aspects of semantic composition such as the negation function .", "in this paper , we apply lexical and compositional semantic models to decode fmri patterns associated with negated and affirmative sentences containing hand - action verbs .", "our results show reduced decoding ( correlation ) of sentences where the verb is in the negated context , as compared to the affirmative one , within brain regions implicated in action - semantic processing .", "this supports behavioral and brain imaging studies , suggesting that negation involves reduced access to aspects of the affirmative mental representation .", "the results pave the way for testing alternate semantic models of negation against human semantic processing in the brain ."], "events": [{"event_type": "ITT", "arguments": [{"text": "distributional semantic models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["distributional", "semantic", "models"], "offsets": [4, 5, 6]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [9]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [61]}, {"text": "decode", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["decode"], "offsets": [69]}, {"text": "lexical models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["lexical", "models"], "offsets": [63, 67]}, {"text": "compositional semantic models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["compositional", "semantic", "models"], "offsets": [65, 66, 67]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [62]}}, {"event_type": "PUR", "arguments": [{"text": "associated with negated and affirmative sentences containing hand - action verbs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["associated", "with", "negated", "and", "affirmative", "sentences", "containing", "hand", "-", "action", "verbs"], "offsets": [72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82]}, {"text": "fmri patterns", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["fmri", "patterns"], "offsets": [70, 71]}], "trigger": {"text": "decode", "tokens": ["decode"], "offsets": [69]}}, {"event_type": "CMP", "arguments": [{"text": "decoding", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["decoding"], "offsets": [88]}, {"text": "affirmative one", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["affirmative", "one"], "offsets": [107, 108]}, {"text": "within brain regions implicated in action - semantic processing", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["within", "brain", "regions", "implicated", "in", "action", "-", "semantic", "processing"], "offsets": [110, 111, 112, 113, 114, 115, 116, 117, 118]}, {"text": "sentences", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["sentences"], "offsets": [93]}, {"text": "where the verb is in the negated context", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["where", "the", "verb", "is", "in", "the", "negated", "context"], "offsets": [94, 95, 96, 97, 98, 99, 100, 101]}, {"text": "reduced", "nugget_type": "WEA", "argument_type": "Result", "tokens": ["reduced"], "offsets": [87]}], "trigger": {"text": "compared", "tokens": ["compared"], "offsets": [104]}}, {"event_type": "FAC", "arguments": [{"text": "negation", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["negation"], "offsets": [130]}, {"text": "reduced access to aspects of the affirmative mental representation", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["reduced", "access", "to", "aspects", "of", "the", "affirmative", "mental", "representation"], "offsets": [132, 133, 134, 135, 136, 137, 138, 139, 140]}], "trigger": {"text": "involves", "tokens": ["involves"], "offsets": [131]}}], "document": ["recent", "work", "shows", "that", "distributional", "semantic", "models", "can", "be", "used", "to", "decode", "patterns", "of", "brain", "activity", "associated", "with", "individual", "words", "and", "sentence", "meanings", ".", "however", ",", "it", "is", "yet", "unclear", "to", "what", "extent", "such", "models", "can", "be", "used", "to", "study", "and", "decode", "fmri", "patterns", "associated", "with", "specific", "aspects", "of", "semantic", "composition", "such", "as", "the", "negation", "function", ".", "in", "this", "paper", ",", "we", "apply", "lexical", "and", "compositional", "semantic", "models", "to", "decode", "fmri", "patterns", "associated", "with", "negated", "and", "affirmative", "sentences", "containing", "hand", "-", "action", "verbs", ".", "our", "results", "show", "reduced", "decoding", "(", "correlation", ")", "of", "sentences", "where", "the", "verb", "is", "in", "the", "negated", "context", ",", "as", "compared", "to", "the", "affirmative", "one", ",", "within", "brain", "regions", "implicated", "in", "action", "-", "semantic", "processing", ".", "this", "supports", "behavioral", "and", "brain", "imaging", "studies", ",", "suggesting", "that", "negation", "involves", "reduced", "access", "to", "aspects", "of", "the", "affirmative", "mental", "representation", ".", "the", "results", "pave", "the", "way", "for", "testing", "alternate", "semantic", "models", "of", "negation", "against", "human", "semantic", "processing", "in", "the", "brain", "."]}, {"venue": "ACL", "title": "BRIO: Bringing Order to Abstractive Summarization", "abstract": "Abstractive summarization models are commonly trained using maximum likelihood estimation, which assumes a deterministic (one-point) target distribution in which an ideal model will assign all the probability mass to the reference summary. This assumption may lead to performance degradation during inference, where the model needs to compare several system-generated (candidate) summaries that have deviated from the reference summary. To address this problem, we propose a novel training paradigm which assumes a non-deterministic distribution so that different candidate summaries are assigned probability mass according to their quality. Our method achieves a new state-of-the-art result on the CNN/DailyMail (47.78 ROUGE-1) and XSum (49.07 ROUGE-1) datasets. Further analysis also shows that our model can estimate probabilities of candidate summaries that are more correlated with their level of quality.", "doc_id": "4ec9b21362b7ac3d3af29be7abd2974d", "publication_year": 2022, "sentences": ["abstractive summarization models are commonly trained using maximum likelihood estimation , which assumes a deterministic ( one - point ) target distribution in which an ideal model will assign all the probability mass to the reference summary .", "this assumption may lead to performance degradation during inference , where the model needs to compare several system - generated ( candidate ) summaries that have deviated from the reference summary .", "to address this problem , we propose a novel training paradigm which assumes a non - deterministic distribution so that different candidate summaries are assigned probability mass according to their quality .", "our method achieves a new state - of - the - art result on the cnn / dailymail ( 47 . 78 rouge - 1 ) and xsum ( 49 . 07 rouge - 1 ) datasets .", "further analysis also shows that our model can estimate probabilities of candidate summaries that are more correlated with their level of quality ."], "events": [{"event_type": "ITT", "arguments": [{"text": "abstractive summarization models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["abstractive", "summarization", "models"], "offsets": [0, 1, 2]}, {"text": "using maximum likelihood estimation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "maximum", "likelihood", "estimation"], "offsets": [6, 7, 8, 9]}], "trigger": {"text": "commonly trained", "tokens": ["commonly", "trained"], "offsets": [4, 5]}}, {"event_type": "RWF", "arguments": [{"text": "performance", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["performance"], "offsets": [43]}, {"text": "degradation", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["degradation"], "offsets": [44]}, {"text": "during inference", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "inference"], "offsets": [45, 46]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [41]}}, {"event_type": "RWF", "arguments": [{"text": "system - generated ( candidate ) summaries", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["system", "-", "generated", "(", "candidate", ")", "summaries"], "offsets": [55, 56, 57, 58, 59, 60, 61]}], "trigger": {"text": "deviated", "tokens": ["deviated"], "offsets": [64]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [75]}, {"text": "training paradigm", "nugget_type": "APP", "argument_type": "Content", "tokens": ["training", "paradigm"], "offsets": [79, 80]}, {"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [71]}, {"text": "assumes", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["assumes"], "offsets": [82]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [76]}}, {"event_type": "PUR", "arguments": [{"text": "this assumption may lead to performance degradation during inference", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["this", "assumption", "may", "lead", "to", "performance", "degradation", "during", "inference"], "offsets": [38, 39, 40, 41, 42, 43, 44, 45, 46]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [71]}}, {"event_type": "PUR", "arguments": [{"text": "non - deterministic distribution", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["non", "-", "deterministic", "distribution"], "offsets": [84, 85, 86, 87]}], "trigger": {"text": "assumes", "tokens": ["assumes"], "offsets": [82]}}, {"event_type": "MDS", "arguments": [{"text": "different candidate summaries", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["different", "candidate", "summaries"], "offsets": [90, 91, 92]}, {"text": "according to their quality", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["according", "to", "different", "candidate", "summaries", "quality"], "offsets": [97, 98, 90, 91, 92, 100]}, {"text": "probability mass", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["probability", "mass"], "offsets": [95, 96]}], "trigger": {"text": "assigned", "tokens": ["assigned"], "offsets": [94]}}, {"event_type": "CMP", "arguments": [{"text": "training paradigm", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["training", "paradigm"], "offsets": [79, 80]}, {"text": "new state - of - the - art", "nugget_type": "STR", "argument_type": "Result", "tokens": ["new", "state", "-", "of", "-", "the", "-", "art"], "offsets": [106, 107, 108, 109, 110, 111, 112, 113]}, {"text": "result", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["result"], "offsets": [114]}, {"text": "cnn / dailymail ( 47 . 78 rouge - 1 ) datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["cnn", "/", "dailymail", "(", "47", ".", "78", "rouge", "-", "1", ")", "datasets"], "offsets": [117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 138]}, {"text": "xsum ( 49 . 07 rouge - 1 ) datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["xsum", "(", "49", ".", "07", "rouge", "-", "1", ")", "datasets"], "offsets": [129, 130, 131, 132, 133, 134, 135, 136, 137, 138]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [104]}}, {"event_type": "FAC", "arguments": [{"text": "our model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["our", "model"], "offsets": [145, 146]}, {"text": "probabilities of candidate summaries", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["probabilities", "of", "candidate", "summaries"], "offsets": [149, 150, 151, 152]}], "trigger": {"text": "estimate", "tokens": ["estimate"], "offsets": [148]}}], "document": ["abstractive", "summarization", "models", "are", "commonly", "trained", "using", "maximum", "likelihood", "estimation", ",", "which", "assumes", "a", "deterministic", "(", "one", "-", "point", ")", "target", "distribution", "in", "which", "an", "ideal", "model", "will", "assign", "all", "the", "probability", "mass", "to", "the", "reference", "summary", ".", "this", "assumption", "may", "lead", "to", "performance", "degradation", "during", "inference", ",", "where", "the", "model", "needs", "to", "compare", "several", "system", "-", "generated", "(", "candidate", ")", "summaries", "that", "have", "deviated", "from", "the", "reference", "summary", ".", "to", "address", "this", "problem", ",", "we", "propose", "a", "novel", "training", "paradigm", "which", "assumes", "a", "non", "-", "deterministic", "distribution", "so", "that", "different", "candidate", "summaries", "are", "assigned", "probability", "mass", "according", "to", "their", "quality", ".", "our", "method", "achieves", "a", "new", "state", "-", "of", "-", "the", "-", "art", "result", "on", "the", "cnn", "/", "dailymail", "(", "47", ".", "78", "rouge", "-", "1", ")", "and", "xsum", "(", "49", ".", "07", "rouge", "-", "1", ")", "datasets", ".", "further", "analysis", "also", "shows", "that", "our", "model", "can", "estimate", "probabilities", "of", "candidate", "summaries", "that", "are", "more", "correlated", "with", "their", "level", "of", "quality", "."]}, {"venue": "ACL", "title": "Conditional Generation of Temporally-ordered Event Sequences", "abstract": "Models of narrative schema knowledge have proven useful for a range of event-related tasks, but they typically do not capture the temporal relationships between events. We propose a single model that addresses both temporal ordering, sorting given events into the order they occurred, and event infilling, predicting new events which fit into an existing temporally-ordered sequence. We use a BART-based conditional generation model that can capture both temporality and common event co-occurrence, meaning it can be flexibly applied to different tasks in this space. Our model is trained as a denoising autoencoder: we take temporally-ordered event sequences, shuffle them, delete some events, and then attempt to recover the original event sequence. This task teaches the model to make inferences given incomplete knowledge about the events in an underlying scenario. On the temporal ordering task, we show that our model is able to unscramble event sequences from existing datasets without access to explicitly labeled temporal training data, outperforming both a BERT-based pairwise model and a BERT-based pointer network. On event infilling, human evaluation shows that our model is able to generate events that fit better temporally into the input events when compared to GPT-2 story completion models.", "doc_id": "f7bac977fdf2a33e399f4f90d3c6b2bc", "publication_year": 2021, "sentences": ["models of narrative schema knowledge have proven useful for a range of event - related tasks , but they typically do not capture the temporal relationships between events .", "we propose a single model that addresses both temporal ordering , sorting given events into the order they occurred , and event infilling , predicting new events which fit into an existing temporally - ordered sequence .", "we use a bart - based conditional generation model that can capture both temporality and common event co - occurrence , meaning it can be flexibly applied to different tasks in this space .", "our model is trained as a denoising autoencoder : we take temporally - ordered event sequences , shuffle them , delete some events , and then attempt to recover the original event sequence .", "this task teaches the model to make inferences given incomplete knowledge about the events in an underlying scenario .", "on the temporal ordering task , we show that our model is able to unscramble event sequences from existing datasets without access to explicitly labeled temporal training data , outperforming both a bert - based pairwise model and a bert - based pointer network .", "on event infilling , human evaluation shows that our model is able to generate events that fit better temporally into the input events when compared to gpt - 2 story completion models ."], "events": [{"event_type": "RWF", "arguments": [{"text": "models of narrative schema knowledge", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["models", "of", "narrative", "schema", "knowledge"], "offsets": [0, 1, 2, 3, 4]}, {"text": "not capture", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "capture"], "offsets": [21, 22]}], "trigger": {"text": "not capture", "tokens": ["not", "capture"], "offsets": [21, 22]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [29]}, {"text": "single model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["single", "model"], "offsets": [32, 33]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [30]}}, {"event_type": "MDS", "arguments": [{"text": "bart - based conditional generation model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["bart", "-", "based", "conditional", "generation", "model"], "offsets": [69, 70, 71, 72, 73, 74]}, {"text": "denoising autoencoder", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["denoising", "autoencoder"], "offsets": [106, 107]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [103]}}, {"event_type": "MDS", "arguments": [{"text": "this task teaches the model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["this", "task", "teaches", "the", "model"], "offsets": [134, 135, 136, 137, 138]}, {"text": "inferences", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["inferences"], "offsets": [141]}, {"text": "given incomplete knowledge about the events in an underlying scenario", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["given", "incomplete", "knowledge", "about", "the", "events", "in", "an", "underlying", "scenario"], "offsets": [142, 143, 144, 145, 146, 147, 148, 149, 150, 151]}], "trigger": {"text": "make", "tokens": ["make"], "offsets": [140]}}, {"event_type": "FIN", "arguments": [{"text": "unscramble", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["unscramble"], "offsets": [167]}, {"text": "outperforming", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforming"], "offsets": [182]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [160]}}, {"event_type": "FAC", "arguments": [{"text": "on the temporal ordering task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "temporal", "ordering", "task"], "offsets": [153, 154, 155, 156, 157]}, {"text": "single model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["single", "model"], "offsets": [32, 33]}, {"text": "event sequences", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["event", "sequences"], "offsets": [168, 169]}, {"text": "existing datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["existing", "datasets"], "offsets": [171, 172]}, {"text": "without access to explicitly labeled temporal training data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "access", "to", "explicitly", "labeled", "temporal", "training", "data"], "offsets": [173, 174, 175, 176, 177, 178, 179, 180]}], "trigger": {"text": "unscramble", "tokens": ["unscramble"], "offsets": [167]}}, {"event_type": "CMP", "arguments": [{"text": "outperforming", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforming"], "offsets": [182]}, {"text": "bert - based pointer network", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["bert", "-", "based", "pointer", "network"], "offsets": [192, 193, 194, 195, 196]}, {"text": "single model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["single", "model"], "offsets": [32, 33]}, {"text": "on the temporal ordering task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "temporal", "ordering", "task"], "offsets": [153, 154, 155, 156, 157]}], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [182]}}, {"event_type": "FIN", "arguments": [{"text": "generate", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["generate"], "offsets": [211]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [204]}}, {"event_type": "CMP", "arguments": [{"text": "on event infilling", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "event", "infilling"], "offsets": [198, 199, 200]}, {"text": "gpt - 2 story completion models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["gpt", "-", "2", "story", "completion", "models"], "offsets": [224, 225, 226, 227, 228, 229]}, {"text": "single model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["single", "model"], "offsets": [32, 33]}, {"text": "events", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["events"], "offsets": [212]}, {"text": "better temporally into the input events", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better", "temporally", "into", "the", "input", "events"], "offsets": [215, 216, 217, 218, 219, 220]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [211]}}, {"event_type": "MDS", "arguments": [], "trigger": {"text": "sorting", "tokens": ["sorting"], "offsets": [40]}}, {"event_type": "PUR", "arguments": [], "trigger": {"text": "addresses", "tokens": ["addresses"], "offsets": [35]}}, {"event_type": "MDS", "arguments": [{"text": "new events", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["new", "events"], "offsets": [54, 55]}, {"text": "addresses", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["addresses"], "offsets": [35]}], "trigger": {"text": "predicting", "tokens": ["predicting"], "offsets": [53]}}, {"event_type": "PUR", "arguments": [{"text": "event infilling", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["event", "infilling"], "offsets": [50, 51]}], "trigger": {"text": "addresses", "tokens": ["addresses"], "offsets": [35]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [66]}, {"text": "bart - based conditional generation model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["bart", "-", "based", "conditional", "generation", "model"], "offsets": [69, 70, 71, 72, 73, 74]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [67]}}, {"event_type": "MDS", "arguments": [{"text": "temporally - ordered event sequences", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["temporally", "-", "ordered", "event", "sequences"], "offsets": [111, 112, 113, 114, 115]}], "trigger": {"text": "take", "tokens": ["take"], "offsets": [110]}}, {"event_type": "MDS", "arguments": [{"text": "temporally - ordered event sequences", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["temporally", "-", "ordered", "event", "sequences"], "offsets": [111, 112, 113, 114, 115]}], "trigger": {"text": "shuffle", "tokens": ["shuffle"], "offsets": [117]}}, {"event_type": "MDS", "arguments": [{"text": "some events", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["some", "events"], "offsets": [121, 122]}], "trigger": {"text": "delete", "tokens": ["delete"], "offsets": [120]}}, {"event_type": "MDS", "arguments": [{"text": "original event sequence", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["original", "event", "sequence"], "offsets": [130, 131, 132]}], "trigger": {"text": "recover", "tokens": ["recover"], "offsets": [128]}}], "document": ["models", "of", "narrative", "schema", "knowledge", "have", "proven", "useful", "for", "a", "range", "of", "event", "-", "related", "tasks", ",", "but", "they", "typically", "do", "not", "capture", "the", "temporal", "relationships", "between", "events", ".", "we", "propose", "a", "single", "model", "that", "addresses", "both", "temporal", "ordering", ",", "sorting", "given", "events", "into", "the", "order", "they", "occurred", ",", "and", "event", "infilling", ",", "predicting", "new", "events", "which", "fit", "into", "an", "existing", "temporally", "-", "ordered", "sequence", ".", "we", "use", "a", "bart", "-", "based", "conditional", "generation", "model", "that", "can", "capture", "both", "temporality", "and", "common", "event", "co", "-", "occurrence", ",", "meaning", "it", "can", "be", "flexibly", "applied", "to", "different", "tasks", "in", "this", "space", ".", "our", "model", "is", "trained", "as", "a", "denoising", "autoencoder", ":", "we", "take", "temporally", "-", "ordered", "event", "sequences", ",", "shuffle", "them", ",", "delete", "some", "events", ",", "and", "then", "attempt", "to", "recover", "the", "original", "event", "sequence", ".", "this", "task", "teaches", "the", "model", "to", "make", "inferences", "given", "incomplete", "knowledge", "about", "the", "events", "in", "an", "underlying", "scenario", ".", "on", "the", "temporal", "ordering", "task", ",", "we", "show", "that", "our", "model", "is", "able", "to", "unscramble", "event", "sequences", "from", "existing", "datasets", "without", "access", "to", "explicitly", "labeled", "temporal", "training", "data", ",", "outperforming", "both", "a", "bert", "-", "based", "pairwise", "model", "and", "a", "bert", "-", "based", "pointer", "network", ".", "on", "event", "infilling", ",", "human", "evaluation", "shows", "that", "our", "model", "is", "able", "to", "generate", "events", "that", "fit", "better", "temporally", "into", "the", "input", "events", "when", "compared", "to", "gpt", "-", "2", "story", "completion", "models", "."]}, {"venue": "ACL", "title": "Lite Unified Modeling for Discriminative Reading Comprehension", "abstract": "As a broad and major category in machine reading comprehension (MRC), the generalized goal of discriminative MRC is answer prediction from the given materials. However, the focuses of various discriminative MRC tasks may be diverse enough: multi-choice MRC requires model to highlight and integrate all potential critical evidence globally; while extractive MRC focuses on higher local boundary preciseness for answer extraction. Among previous works, there lacks a unified design with pertinence for the overall discriminative MRC tasks. To fill in above gap, we propose a lightweight POS-Enhanced Iterative Co-Attention Network (POI-Net) as the first attempt of unified modeling with pertinence, to handle diverse discriminative MRC tasks synchronously. Nearly without introducing more parameters, our lite unified design brings model significant improvement with both encoder and decoder components. The evaluation results on four discriminative MRC benchmarks consistently indicate the general effectiveness and applicability of our model, and the code is available at https://github.com/Yilin1111/poi-net.", "doc_id": "4d6e62ae20f1b95678e43de4162d44f6", "publication_year": 2022, "sentences": ["as a broad and major category in machine reading comprehension ( mrc ) , the generalized goal of discriminative mrc is answer prediction from the given materials .", "however , the focuses of various discriminative mrc tasks may be diverse enough : multi - choice mrc requires model to highlight and integrate all potential critical evidence globally ; while extractive mrc focuses on higher local boundary preciseness for answer extraction .", "among previous works , there lacks a unified design with pertinence for the overall discriminative mrc tasks .", "to fill in above gap , we propose a lightweight pos - enhanced iterative co - attention network ( poi - net ) as the first attempt of unified modeling with pertinence , to handle diverse discriminative mrc tasks synchronously .", "nearly without introducing more parameters , our lite unified design brings model significant improvement with both encoder and decoder components .", "the evaluation results on four discriminative mrc benchmarks consistently indicate the general effectiveness and applicability of our model , and the code is available at https : / / github . com / yilin1111 / poi - net ."], "events": [{"event_type": "ITT", "arguments": [{"text": "machine reading comprehension", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["machine", "reading", "comprehension"], "offsets": [7, 8, 9]}], "trigger": {"text": "category", "tokens": ["category"], "offsets": [5]}}, {"event_type": "RWF", "arguments": [{"text": "overall discriminative mrc tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["overall", "discriminative", "machine", "reading", "comprehension", "tasks"], "offsets": [84, 85, 7, 8, 9, 87]}, {"text": "previous works", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["previous", "works"], "offsets": [72, 73]}, {"text": "lacks", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lacks"], "offsets": [76]}], "trigger": {"text": "lacks", "tokens": ["lacks"], "offsets": [76]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [95]}, {"text": "pos - enhanced iterative co - attention network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pos", "-", "enhanced", "iterative", "co", "-", "attention", "network"], "offsets": [99, 100, 101, 102, 103, 104, 105, 106]}, {"text": "handle", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["handle"], "offsets": [123]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [96]}}, {"event_type": "PUR", "arguments": [{"text": "discriminative mrc tasks", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["discriminative", "machine", "reading", "comprehension", "tasks"], "offsets": [125, 7, 8, 9, 127]}], "trigger": {"text": "handle", "tokens": ["handle"], "offsets": [123]}}, {"event_type": "FAC", "arguments": [{"text": "nearly without introducing more parameters", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["nearly", "without", "introducing", "more", "parameters"], "offsets": [130, 131, 132, 133, 134]}, {"text": "pos - enhanced iterative co - attention network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["pos", "-", "enhanced", "iterative", "co", "-", "attention", "network"], "offsets": [99, 100, 101, 102, 103, 104, 105, 106]}, {"text": "significant improvement", "nugget_type": "STR", "argument_type": "Object", "tokens": ["significant", "improvement"], "offsets": [142, 143]}, {"text": "with both encoder components", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "both", "encoder", "components"], "offsets": [144, 145, 146, 149]}, {"text": "decoder components", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["decoder", "components"], "offsets": [148, 149]}], "trigger": {"text": "brings", "tokens": ["brings"], "offsets": [140]}}, {"event_type": "FAC", "arguments": [{"text": "on four discriminative mrc benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "four", "discriminative", "machine", "reading", "comprehension", "benchmarks"], "offsets": [154, 155, 156, 7, 8, 9, 158]}, {"text": "general effectiveness", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["general", "effectiveness"], "offsets": [162, 163]}, {"text": "general applicability", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["general", "applicability"], "offsets": [162, 165]}], "trigger": {"text": "indicate", "tokens": ["indicate"], "offsets": [160]}}], "document": ["as", "a", "broad", "and", "major", "category", "in", "machine", "reading", "comprehension", "(", "mrc", ")", ",", "the", "generalized", "goal", "of", "discriminative", "mrc", "is", "answer", "prediction", "from", "the", "given", "materials", ".", "however", ",", "the", "focuses", "of", "various", "discriminative", "mrc", "tasks", "may", "be", "diverse", "enough", ":", "multi", "-", "choice", "mrc", "requires", "model", "to", "highlight", "and", "integrate", "all", "potential", "critical", "evidence", "globally", ";", "while", "extractive", "mrc", "focuses", "on", "higher", "local", "boundary", "preciseness", "for", "answer", "extraction", ".", "among", "previous", "works", ",", "there", "lacks", "a", "unified", "design", "with", "pertinence", "for", "the", "overall", "discriminative", "mrc", "tasks", ".", "to", "fill", "in", "above", "gap", ",", "we", "propose", "a", "lightweight", "pos", "-", "enhanced", "iterative", "co", "-", "attention", "network", "(", "poi", "-", "net", ")", "as", "the", "first", "attempt", "of", "unified", "modeling", "with", "pertinence", ",", "to", "handle", "diverse", "discriminative", "mrc", "tasks", "synchronously", ".", "nearly", "without", "introducing", "more", "parameters", ",", "our", "lite", "unified", "design", "brings", "model", "significant", "improvement", "with", "both", "encoder", "and", "decoder", "components", ".", "the", "evaluation", "results", "on", "four", "discriminative", "mrc", "benchmarks", "consistently", "indicate", "the", "general", "effectiveness", "and", "applicability", "of", "our", "model", ",", "and", "the", "code", "is", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "yilin1111", "/", "poi", "-", "net", "."]}, {"venue": "ACL", "title": "On the Spontaneous Emergence of Discrete and Compositional Signals", "abstract": "We propose a general framework to study language emergence through signaling games with neural agents. Using a continuous latent space, we are able to (i) train using backpropagation, (ii) show that discrete messages nonetheless naturally emerge. We explore whether categorical perception effects follow and show that the messages are not compositional.", "doc_id": "a1405739c41b525f9fd26a7a69d505e7", "publication_year": 2020, "sentences": ["we propose a general framework to study language emergence through signaling games with neural agents .", "using a continuous latent space , we are able to ( i ) train using backpropagation , ( ii ) show that discrete messages nonetheless naturally emerge .", "we explore whether categorical perception effects follow and show that the messages are not compositional ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "general framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["general", "framework"], "offsets": [3, 4]}, {"text": "study", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["study"], "offsets": [6]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "language emergence", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["language", "emergence"], "offsets": [7, 8]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [6]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [44]}, {"text": "categorical perception effects", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["categorical", "perception", "effects"], "offsets": [47, 48, 49]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [45]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [22]}, {"text": "backpropagation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["backpropagation"], "offsets": [31]}, {"text": "train", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["train"], "offsets": [29]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [30]}}, {"event_type": "FAC", "arguments": [{"text": "discrete messages", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["discrete", "messages"], "offsets": [38, 39]}, {"text": "emerge", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["emerge"], "offsets": [42]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [36]}}, {"event_type": "MDS", "arguments": [{"text": "games", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["games"], "offsets": [11]}, {"text": "neural agents", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["neural", "agents"], "offsets": [13, 14]}], "trigger": {"text": "signaling", "tokens": ["signaling"], "offsets": [10]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [44]}, {"text": "not compositional", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["not", "compositional"], "offsets": [57, 58]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [52]}}, {"event_type": "FAC", "arguments": [{"text": "messages", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["messages"], "offsets": [55]}], "trigger": {"text": "not compositional", "tokens": ["not", "compositional"], "offsets": [57, 58]}}, {"event_type": "WKS", "arguments": [{"text": "using a continuous latent space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "a", "continuous", "latent", "space"], "offsets": [16, 17, 18, 19, 20]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [22]}, {"text": "backpropagation", "nugget_type": "APP", "argument_type": "Content", "tokens": ["backpropagation"], "offsets": [31]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [30]}}, {"event_type": "WKS", "arguments": [{"text": "using a continuous latent space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "a", "continuous", "latent", "space"], "offsets": [16, 17, 18, 19, 20]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [22]}, {"text": "discrete messages nonetheless naturally emerge", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["discrete", "messages", "nonetheless", "naturally", "emerge"], "offsets": [38, 39, 40, 41, 42]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [36]}}], "document": ["we", "propose", "a", "general", "framework", "to", "study", "language", "emergence", "through", "signaling", "games", "with", "neural", "agents", ".", "using", "a", "continuous", "latent", "space", ",", "we", "are", "able", "to", "(", "i", ")", "train", "using", "backpropagation", ",", "(", "ii", ")", "show", "that", "discrete", "messages", "nonetheless", "naturally", "emerge", ".", "we", "explore", "whether", "categorical", "perception", "effects", "follow", "and", "show", "that", "the", "messages", "are", "not", "compositional", "."]}, {"venue": "ACL", "title": "Catchphrase: Automatic Detection of Cultural References", "abstract": "A snowclone is a customizable phrasal template that can be realized in multiple, instantly recognized variants. For example, \u201c* is the new *\" (Orange is the new black, 40 is the new 30). Snowclones are extensively used in social media. In this paper, we study snowclones originating from pop-culture quotes; our goal is to automatically detect cultural references in text. We introduce a new, publicly available data set of pop-culture quotes and their corresponding snowclone usages and train models on them. We publish code for Catchphrase, an internet browser plugin to automatically detect and mark references in real-time, and examine its performance via a user study. Aside from assisting people to better comprehend cultural references, we hope that detecting snowclones can complement work on paraphrasing and help tackling long-standing questions in social science about the dynamics of information propagation.", "doc_id": "97b251b62c72af39399ac922a73e5cee", "publication_year": 2021, "sentences": ["a snowclone is a customizable phrasal template that can be realized in multiple , instantly recognized variants .", "for example , \u201c * is the new * \" ( orange is the new black , 40 is the new 30 ) .", "snowclones are extensively used in social media .", "in this paper , we study snowclones originating from pop - culture quotes ; our goal is to automatically detect cultural references in text .", "we introduce a new , publicly available data set of pop - culture quotes and their corresponding snowclone usages and train models on them .", "we publish code for catchphrase , an internet browser plugin to automatically detect and mark references in real - time , and examine its performance via a user study .", "aside from assisting people to better comprehend cultural references , we hope that detecting snowclones can complement work on paraphrasing and help tackling long - standing questions in social science about the dynamics of information propagation ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [75]}, {"text": "publicly available data set", "nugget_type": "DST", "argument_type": "Content", "tokens": ["publicly", "available", "data", "set"], "offsets": [80, 81, 82, 83]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [76]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [100]}, {"text": "code for catchphrase", "nugget_type": "APP", "argument_type": "Content", "tokens": ["code", "for", "catchphrase"], "offsets": [102, 103, 104]}, {"text": "mark", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["mark"], "offsets": [114]}, {"text": "examine", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["examine"], "offsets": [122]}], "trigger": {"text": "publish", "tokens": ["publish"], "offsets": [101]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [54]}, {"text": "automatically detect", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["automatically", "detect"], "offsets": [68, 69]}, {"text": "snowclones originating", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["snowclones", "originating"], "offsets": [56, 57]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [55]}}, {"event_type": "PUR", "arguments": [{"text": "cultural references in text", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["cultural", "references", "in", "text"], "offsets": [70, 71, 72, 73]}], "trigger": {"text": "automatically detect", "tokens": ["automatically", "detect"], "offsets": [68, 69]}}, {"event_type": "PUR", "arguments": [{"text": "references", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["references"], "offsets": [115]}, {"text": "in real - time", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "real", "-", "time"], "offsets": [116, 117, 118, 119]}], "trigger": {"text": "mark", "tokens": ["mark"], "offsets": [114]}}, {"event_type": "PUR", "arguments": [{"text": "its performance via a user study", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["its", "performance", "via", "a", "user", "study"], "offsets": [123, 124, 125, 126, 127, 128]}], "trigger": {"text": "examine", "tokens": ["examine"], "offsets": [122]}}], "document": ["a", "snowclone", "is", "a", "customizable", "phrasal", "template", "that", "can", "be", "realized", "in", "multiple", ",", "instantly", "recognized", "variants", ".", "for", "example", ",", "\u201c", "*", "is", "the", "new", "*", "\"", "(", "orange", "is", "the", "new", "black", ",", "40", "is", "the", "new", "30", ")", ".", "snowclones", "are", "extensively", "used", "in", "social", "media", ".", "in", "this", "paper", ",", "we", "study", "snowclones", "originating", "from", "pop", "-", "culture", "quotes", ";", "our", "goal", "is", "to", "automatically", "detect", "cultural", "references", "in", "text", ".", "we", "introduce", "a", "new", ",", "publicly", "available", "data", "set", "of", "pop", "-", "culture", "quotes", "and", "their", "corresponding", "snowclone", "usages", "and", "train", "models", "on", "them", ".", "we", "publish", "code", "for", "catchphrase", ",", "an", "internet", "browser", "plugin", "to", "automatically", "detect", "and", "mark", "references", "in", "real", "-", "time", ",", "and", "examine", "its", "performance", "via", "a", "user", "study", ".", "aside", "from", "assisting", "people", "to", "better", "comprehend", "cultural", "references", ",", "we", "hope", "that", "detecting", "snowclones", "can", "complement", "work", "on", "paraphrasing", "and", "help", "tackling", "long", "-", "standing", "questions", "in", "social", "science", "about", "the", "dynamics", "of", "information", "propagation", "."]}, {"venue": "ACL", "title": "Improving Event Detection via Open-domain Trigger Knowledge", "abstract": "Event Detection (ED) is a fundamental task in automatically structuring texts. Due to the small scale of training data, previous methods perform poorly on unseen/sparsely labeled trigger words and are prone to overfitting densely labeled trigger words. To address the issue, we propose a novel Enrichment Knowledge Distillation (EKD) model to leverage external open-domain trigger knowledge to reduce the in-built biases to frequent trigger words in annotations. Experiments on benchmark ACE2005 show that our model outperforms nine strong baselines, is especially effective for unseen/sparsely labeled trigger words. The source code is released on https://github.com/shuaiwa16/ekd.git.", "doc_id": "a5b801f5817cc55afaceedc9fe4f9087", "publication_year": 2020, "sentences": ["event detection ( ed ) is a fundamental task in automatically structuring texts .", "due to the small scale of training data , previous methods perform poorly on unseen / sparsely labeled trigger words and are prone to overfitting densely labeled trigger words .", "to address the issue , we propose a novel enrichment knowledge distillation ( ekd ) model to leverage external open - domain trigger knowledge to reduce the in - built biases to frequent trigger words in annotations .", "experiments on benchmark ace2005 show that our model outperforms nine strong baselines , is especially effective for unseen / sparsely labeled trigger words .", "the source code is released on https : / / github . com / shuaiwa16 / ekd . git ."], "events": [{"event_type": "ITT", "arguments": [{"text": "event detection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["event", "detection"], "offsets": [0, 1]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "unseen / sparsely labeled trigger words", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["unseen", "/", "sparsely", "labeled", "trigger", "words"], "offsets": [28, 29, 30, 31, 32, 33]}, {"text": "poorly", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["poorly"], "offsets": [26]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [25]}}, {"event_type": "RWF", "arguments": [{"text": "overfitting", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["overfitting"], "offsets": [38]}], "trigger": {"text": "overfitting", "tokens": ["overfitting"], "offsets": [38]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [49]}, {"text": "novel enrichment knowledge distillation ( ekd ) model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["novel", "enrichment", "knowledge", "distillation", "model"], "offsets": [52, 53, 54, 55, 59]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [50]}}, {"event_type": "MDS", "arguments": [{"text": "in - built biases", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["in", "-", "built", "biases"], "offsets": [71, 72, 73, 74]}, {"text": "external open - domain trigger knowledge", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["external", "open", "-", "domain", "trigger", "knowledge"], "offsets": [62, 63, 64, 65, 66, 67]}], "trigger": {"text": "reduce", "tokens": ["reduce"], "offsets": [69]}}, {"event_type": "CMP", "arguments": [{"text": "on benchmark ace2005", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "benchmark", "ace2005"], "offsets": [83, 84, 85]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [90]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [90]}}, {"event_type": "FAC", "arguments": [{"text": "unseen / sparsely labeled trigger words", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["unseen", "/", "sparsely", "labeled", "trigger", "words"], "offsets": [99, 100, 101, 102, 103, 104]}, {"text": "enrichment knowledge distillation ( ekd ) model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["enrichment", "knowledge", "distillation", "model"], "offsets": [53, 54, 55, 59]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [97]}}], "document": ["event", "detection", "(", "ed", ")", "is", "a", "fundamental", "task", "in", "automatically", "structuring", "texts", ".", "due", "to", "the", "small", "scale", "of", "training", "data", ",", "previous", "methods", "perform", "poorly", "on", "unseen", "/", "sparsely", "labeled", "trigger", "words", "and", "are", "prone", "to", "overfitting", "densely", "labeled", "trigger", "words", ".", "to", "address", "the", "issue", ",", "we", "propose", "a", "novel", "enrichment", "knowledge", "distillation", "(", "ekd", ")", "model", "to", "leverage", "external", "open", "-", "domain", "trigger", "knowledge", "to", "reduce", "the", "in", "-", "built", "biases", "to", "frequent", "trigger", "words", "in", "annotations", ".", "experiments", "on", "benchmark", "ace2005", "show", "that", "our", "model", "outperforms", "nine", "strong", "baselines", ",", "is", "especially", "effective", "for", "unseen", "/", "sparsely", "labeled", "trigger", "words", ".", "the", "source", "code", "is", "released", "on", "https", ":", "/", "/", "github", ".", "com", "/", "shuaiwa16", "/", "ekd", ".", "git", "."]}, {"venue": "ACL", "title": "Multilingual Unsupervised NMT using Shared Encoder and Language-Specific Decoders", "abstract": "In this paper, we propose a multilingual unsupervised NMT scheme which jointly trains multiple languages with a shared encoder and multiple decoders. Our approach is based on denoising autoencoding of each language and back-translating between English and multiple non-English languages. This results in a universal encoder which can encode any language participating in training into an inter-lingual representation, and language-specific decoders. Our experiments using only monolingual corpora show that multilingual unsupervised model performs better than the separately trained bilingual models achieving improvement of up to 1.48 BLEU points on WMT test sets. We also observe that even if we do not train the network for all possible translation directions, the network is still able to translate in a many-to-many fashion leveraging encoder\u2019s ability to generate interlingual representation.", "doc_id": "16078f2869ec108fecfe2e088ed0a81e", "publication_year": 2019, "sentences": ["in this paper , we propose a multilingual unsupervised nmt scheme which jointly trains multiple languages with a shared encoder and multiple decoders .", "our approach is based on denoising autoencoding of each language and back - translating between english and multiple non - english languages .", "this results in a universal encoder which can encode any language participating in training into an inter - lingual representation , and language - specific decoders .", "our experiments using only monolingual corpora show that multilingual unsupervised model performs better than the separately trained bilingual models achieving improvement of up to 1 . 48 bleu points on wmt test sets .", "we also observe that even if we do not train the network for all possible translation directions , the network is still able to translate in a many - to - many fashion leveraging encoder \u2019 s ability to generate interlingual representation ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [4]}, {"text": "multilingual unsupervised nmt scheme", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multilingual", "unsupervised", "nmt", "scheme"], "offsets": [7, 8, 9, 10]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [5]}}, {"event_type": "MDS", "arguments": [{"text": "multiple languages", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["multiple", "languages"], "offsets": [14, 15]}, {"text": "shared encoder", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["shared", "encoder"], "offsets": [18, 19]}, {"text": "multiple decoders", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["multiple", "decoders"], "offsets": [21, 22]}], "trigger": {"text": "jointly trains", "tokens": ["jointly", "trains"], "offsets": [12, 13]}}, {"event_type": "WKS", "arguments": [{"text": "autoencoding of each language", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["autoencoding", "of", "each", "language"], "offsets": [30, 31, 32, 33]}, {"text": "back - translating between english and multiple non - english languages", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["back", "-", "translating", "between", "english", "and", "multiple", "non", "-", "english", "languages"], "offsets": [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45]}], "trigger": {"text": "denoising", "tokens": ["denoising"], "offsets": [29]}}, {"event_type": "FIN", "arguments": [{"text": "performs", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["performs"], "offsets": [85]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [80]}}, {"event_type": "CMP", "arguments": [{"text": "multilingual unsupervised model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["multilingual", "unsupervised", "model"], "offsets": [82, 83, 84]}, {"text": "separately trained bilingual models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["separately", "trained", "bilingual", "models"], "offsets": [89, 90, 91, 92]}, {"text": "up to 1 . 48", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["up", "to", "1", ".", "48"], "offsets": [96, 97, 98, 99, 100]}, {"text": "bleu points", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["bleu", "points"], "offsets": [101, 102]}, {"text": "wmt test sets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["wmt", "test", "sets"], "offsets": [104, 105, 106]}, {"text": "improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvement"], "offsets": [94]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [86]}], "trigger": {"text": "performs", "tokens": ["performs"], "offsets": [85]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [108]}, {"text": "leveraging", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["leveraging"], "offsets": [141]}], "trigger": {"text": "observe", "tokens": ["observe"], "offsets": [110]}}, {"event_type": "FAC", "arguments": [{"text": "even if we do not train the network for all possible translation directions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["even", "if", "we", "do", "not", "train", "the", "network", "for", "all", "possible", "translation", "directions"], "offsets": [112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124]}, {"text": "network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["network"], "offsets": [127]}, {"text": "encoder \u2019 s ability", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["encoder", "\u2019", "s", "ability"], "offsets": [142, 143, 144, 145]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [147]}], "trigger": {"text": "leveraging", "tokens": ["leveraging"], "offsets": [141]}}, {"event_type": "PUR", "arguments": [{"text": "interlingual representation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["interlingual", "representation"], "offsets": [148, 149]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [147]}}, {"event_type": "FAC", "arguments": [{"text": "even if we do not train the network for all possible translation directions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["even", "if", "we", "do", "not", "train", "the", "network", "for", "all", "possible", "translation", "directions"], "offsets": [112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124]}, {"text": "network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["network"], "offsets": [127]}, {"text": "in a many - to - many fashion", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "many", "-", "to", "-", "many", "fashion"], "offsets": [133, 134, 135, 136, 137, 138, 139, 140]}], "trigger": {"text": "translate", "tokens": ["translate"], "offsets": [132]}}], "document": ["in", "this", "paper", ",", "we", "propose", "a", "multilingual", "unsupervised", "nmt", "scheme", "which", "jointly", "trains", "multiple", "languages", "with", "a", "shared", "encoder", "and", "multiple", "decoders", ".", "our", "approach", "is", "based", "on", "denoising", "autoencoding", "of", "each", "language", "and", "back", "-", "translating", "between", "english", "and", "multiple", "non", "-", "english", "languages", ".", "this", "results", "in", "a", "universal", "encoder", "which", "can", "encode", "any", "language", "participating", "in", "training", "into", "an", "inter", "-", "lingual", "representation", ",", "and", "language", "-", "specific", "decoders", ".", "our", "experiments", "using", "only", "monolingual", "corpora", "show", "that", "multilingual", "unsupervised", "model", "performs", "better", "than", "the", "separately", "trained", "bilingual", "models", "achieving", "improvement", "of", "up", "to", "1", ".", "48", "bleu", "points", "on", "wmt", "test", "sets", ".", "we", "also", "observe", "that", "even", "if", "we", "do", "not", "train", "the", "network", "for", "all", "possible", "translation", "directions", ",", "the", "network", "is", "still", "able", "to", "translate", "in", "a", "many", "-", "to", "-", "many", "fashion", "leveraging", "encoder", "\u2019", "s", "ability", "to", "generate", "interlingual", "representation", "."]}, {"venue": "ACL", "title": "Depth Growing for Neural Machine Translation", "abstract": "While very deep neural networks have shown effectiveness for computer vision and text classification applications, how to increase the network depth of the neural machine translation (NMT) models for better translation quality remains a challenging problem. Directly stacking more blocks to the NMT model results in no improvement and even drop in performance. In this work, we propose an effective two-stage approach with three specially designed components to construct deeper NMT models, which result in significant improvements over the strong Transformer baselines on WMT14 English\u2192German and English\u2192French translation tasks.", "doc_id": "ca4d1e1d5277bc2c78f2d8be48b56398", "publication_year": 2019, "sentences": ["while very deep neural networks have shown effectiveness for computer vision and text classification applications , how to increase the network depth of the neural machine translation ( nmt ) models for better translation quality remains a challenging problem .", "directly stacking more blocks to the nmt model results in no improvement and even drop in performance .", "in this work , we propose an effective two - stage approach with three specially designed components to construct deeper nmt models , which result in significant improvements over the strong transformer baselines on wmt14 english\u2192german and english\u2192french translation tasks ."], "events": [{"event_type": "ITT", "arguments": [{"text": "neural networks", "nugget_type": "APP", "argument_type": "Target", "tokens": ["neural", "networks"], "offsets": [3, 4]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [6]}}, {"event_type": "RWS", "arguments": [{"text": "more blocks", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["more", "blocks"], "offsets": [42, 43]}, {"text": "nmt model", "nugget_type": "APP", "argument_type": "Target", "tokens": ["nmt", "model"], "offsets": [46, 47]}], "trigger": {"text": "directly stacking", "tokens": ["directly", "stacking"], "offsets": [40, 41]}}, {"event_type": "RWF", "arguments": [{"text": "no improvement", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["no", "improvement"], "offsets": [50, 51]}, {"text": "even drop", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["even", "drop"], "offsets": [53, 54]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["performance"], "offsets": [56]}], "trigger": {"text": "results", "tokens": ["results"], "offsets": [48]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [62]}, {"text": "effective two - stage approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["effective", "two", "-", "stage", "approach"], "offsets": [65, 66, 67, 68, 69]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [63]}}, {"event_type": "MDS", "arguments": [{"text": "three specially designed components", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["three", "specially", "designed", "components"], "offsets": [71, 72, 73, 74]}, {"text": "deeper nmt models", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["deeper", "neural", "machine", "translation", "models"], "offsets": [77, 24, 25, 26, 79]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [76]}}, {"event_type": "CMP", "arguments": [{"text": "significant", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significant"], "offsets": [84]}, {"text": "improvements", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["improvements"], "offsets": [85]}, {"text": "strong transformer baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "transformer", "baselines"], "offsets": [88, 89, 90]}, {"text": "on wmt14 english\u2192german and english\u2192french translation tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "wmt14", "english\u2192german", "and", "english\u2192french", "translation", "tasks"], "offsets": [91, 92, 93, 94, 95, 96, 97]}, {"text": "effective two - stage approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["effective", "two", "-", "stage", "approach"], "offsets": [65, 66, 67, 68, 69]}], "trigger": {"text": "result", "tokens": ["result"], "offsets": [82]}}], "document": ["while", "very", "deep", "neural", "networks", "have", "shown", "effectiveness", "for", "computer", "vision", "and", "text", "classification", "applications", ",", "how", "to", "increase", "the", "network", "depth", "of", "the", "neural", "machine", "translation", "(", "nmt", ")", "models", "for", "better", "translation", "quality", "remains", "a", "challenging", "problem", ".", "directly", "stacking", "more", "blocks", "to", "the", "nmt", "model", "results", "in", "no", "improvement", "and", "even", "drop", "in", "performance", ".", "in", "this", "work", ",", "we", "propose", "an", "effective", "two", "-", "stage", "approach", "with", "three", "specially", "designed", "components", "to", "construct", "deeper", "nmt", "models", ",", "which", "result", "in", "significant", "improvements", "over", "the", "strong", "transformer", "baselines", "on", "wmt14", "english\u2192german", "and", "english\u2192french", "translation", "tasks", "."]}, {"venue": "ACL", "title": "Code and Named Entity Recognition in StackOverflow", "abstract": "There is an increasing interest in studying natural language and computer code together, as large corpora of programming texts become readily available on the Internet. For example, StackOverflow currently has over 15 million programming related questions written by 8.5 million users. Meanwhile, there is still a lack of fundamental NLP techniques for identifying code tokens or software-related named entities that appear within natural language sentences. In this paper, we introduce a new named entity recognition (NER) corpus for the computer programming domain, consisting of 15,372 sentences annotated with 20 fine-grained entity types. We trained in-domain BERT representations (BERTOverflow) on 152 million sentences from StackOverflow, which lead to an absolute increase of +10 F1 score over off-the-shelf BERT. We also present the SoftNER model which achieves an overall 79.10 F-1 score for code and named entity recognition on StackOverflow data. Our SoftNER model incorporates a context-independent code token classifier with corpus-level features to improve the BERT-based tagging model. Our code and data are available at: https://github.com/jeniyat/StackOverflowNER/", "doc_id": "32602fec4b05d44cc0f2330266c8b4fc", "publication_year": 2020, "sentences": ["there is an increasing interest in studying natural language and computer code together , as large corpora of programming texts become readily available on the internet .", "for example , stackoverflow currently has over 15 million programming related questions written by 8 . 5 million users .", "meanwhile , there is still a lack of fundamental nlp techniques for identifying code tokens or software - related named entities that appear within natural language sentences .", "in this paper , we introduce a new named entity recognition ( ner ) corpus for the computer programming domain , consisting of 15 , 372 sentences annotated with 20 fine - grained entity types .", "we trained in - domain bert representations ( bertoverflow ) on 152 million sentences from stackoverflow , which lead to an absolute increase of + 10 f1 score over off - the - shelf bert .", "we also present the softner model which achieves an overall 79 . 10 f - 1 score for code and named entity recognition on stackoverflow data .", "our softner model incorporates a context - independent code token classifier with corpus - level features to improve the bert - based tagging model .", "our code and data are available at : https : / / github . com / jeniyat / stackoverflowner /"], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["natural", "language"], "offsets": [7, 8]}, {"text": "computer code", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["computer", "code"], "offsets": [10, 11]}], "trigger": {"text": "studying", "tokens": ["studying"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "lack", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lack"], "offsets": [53]}, {"text": "fundamental nlp techniques", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["fundamental", "nlp", "techniques"], "offsets": [55, 56, 57]}], "trigger": {"text": "lack", "tokens": ["lack"], "offsets": [53]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [79]}, {"text": "computer programming domain", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["computer", "programming", "domain"], "offsets": [92, 93, 94]}, {"text": "named entity recognition ( ner ) corpus", "nugget_type": "DST", "argument_type": "Content", "tokens": ["named", "entity", "recognition", "corpus"], "offsets": [83, 84, 85, 89]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [80]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [111]}, {"text": "152 million sentences", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["152", "million", "sentences"], "offsets": [122, 123, 124]}, {"text": "in - domain bert representations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "-", "domain", "bert", "representations"], "offsets": [113, 114, 115, 116, 117]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [112]}}, {"event_type": "CMP", "arguments": [{"text": "f1 score", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1", "score"], "offsets": [137, 138]}, {"text": "off - the - shelf bert", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["off", "-", "the", "-", "shelf", "bert"], "offsets": [140, 141, 142, 143, 144, 145]}, {"text": "+ 10", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["+", "10"], "offsets": [135, 136]}, {"text": "absolute increase", "nugget_type": "STR", "argument_type": "Result", "tokens": ["absolute", "increase"], "offsets": [132, 133]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [129]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [147]}, {"text": "softner model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["softner", "model"], "offsets": [151, 152]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [149]}}, {"event_type": "FAC", "arguments": [{"text": "softner model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["softner", "model"], "offsets": [151, 152]}, {"text": "79 . 10", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["79", ".", "10"], "offsets": [157, 158, 159]}, {"text": "stackoverflow data", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["stackoverflow", "data"], "offsets": [171, 172]}, {"text": "f - 1 score", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["f", "-", "1", "score"], "offsets": [160, 161, 162, 163]}, {"text": "code recognition", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["code", "recognition"], "offsets": [165, 169]}, {"text": "named entity recognition", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["named", "entity", "recognition"], "offsets": [167, 168, 169]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [154]}}, {"event_type": "PUR", "arguments": [{"text": "code tokens", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["code", "tokens"], "offsets": [60, 61]}, {"text": "software - related named entities", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["software", "-", "related", "named", "entities"], "offsets": [63, 64, 65, 66, 67]}], "trigger": {"text": "identifying", "tokens": ["identifying"], "offsets": [59]}}, {"event_type": "MDS", "arguments": [{"text": "context - independent code token classifier", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["context", "-", "independent", "code", "token", "classifier"], "offsets": [179, 180, 181, 182, 183, 184]}, {"text": "corpus - level features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["corpus", "-", "level", "features"], "offsets": [186, 187, 188, 189]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [191]}], "trigger": {"text": "incorporates", "tokens": ["incorporates"], "offsets": [177]}}, {"event_type": "PUR", "arguments": [{"text": "bert - based tagging model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["bert", "-", "based", "tagging", "model"], "offsets": [193, 194, 195, 196, 197]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [191]}}], "document": ["there", "is", "an", "increasing", "interest", "in", "studying", "natural", "language", "and", "computer", "code", "together", ",", "as", "large", "corpora", "of", "programming", "texts", "become", "readily", "available", "on", "the", "internet", ".", "for", "example", ",", "stackoverflow", "currently", "has", "over", "15", "million", "programming", "related", "questions", "written", "by", "8", ".", "5", "million", "users", ".", "meanwhile", ",", "there", "is", "still", "a", "lack", "of", "fundamental", "nlp", "techniques", "for", "identifying", "code", "tokens", "or", "software", "-", "related", "named", "entities", "that", "appear", "within", "natural", "language", "sentences", ".", "in", "this", "paper", ",", "we", "introduce", "a", "new", "named", "entity", "recognition", "(", "ner", ")", "corpus", "for", "the", "computer", "programming", "domain", ",", "consisting", "of", "15", ",", "372", "sentences", "annotated", "with", "20", "fine", "-", "grained", "entity", "types", ".", "we", "trained", "in", "-", "domain", "bert", "representations", "(", "bertoverflow", ")", "on", "152", "million", "sentences", "from", "stackoverflow", ",", "which", "lead", "to", "an", "absolute", "increase", "of", "+", "10", "f1", "score", "over", "off", "-", "the", "-", "shelf", "bert", ".", "we", "also", "present", "the", "softner", "model", "which", "achieves", "an", "overall", "79", ".", "10", "f", "-", "1", "score", "for", "code", "and", "named", "entity", "recognition", "on", "stackoverflow", "data", ".", "our", "softner", "model", "incorporates", "a", "context", "-", "independent", "code", "token", "classifier", "with", "corpus", "-", "level", "features", "to", "improve", "the", "bert", "-", "based", "tagging", "model", ".", "our", "code", "and", "data", "are", "available", "at", ":", "https", ":", "/", "/", "github", ".", "com", "/", "jeniyat", "/", "stackoverflowner", "/"]}, {"venue": "ACL", "title": "Improving Document Representations by Generating Pseudo Query Embeddings for Dense Retrieval", "abstract": "Recently, the retrieval models based on dense representations have been gradually applied in the first stage of the document retrieval tasks, showing better performance than traditional sparse vector space models. To obtain high efficiency, the basic structure of these models is Bi-encoder in most cases. However, this simple structure may cause serious information loss during the encoding of documents since the queries are agnostic. To address this problem, we design a method to mimic the queries to each of the documents by an iterative clustering process and represent the documents by multiple pseudo queries (i.e., the cluster centroids). To boost the retrieval process using approximate nearest neighbor search library, we also optimize the matching function with a two-step score calculation procedure. Experimental results on several popular ranking and QA datasets show that our model can achieve state-of-the-art results while still remaining high efficiency.", "doc_id": "3ffd04c2c5904b6cd8cc1aff7ee21299", "publication_year": 2021, "sentences": ["recently , the retrieval models based on dense representations have been gradually applied in the first stage of the document retrieval tasks , showing better performance than traditional sparse vector space models .", "to obtain high efficiency , the basic structure of these models is bi - encoder in most cases .", "however , this simple structure may cause serious information loss during the encoding of documents since the queries are agnostic .", "to address this problem , we design a method to mimic the queries to each of the documents by an iterative clustering process and represent the documents by multiple pseudo queries ( i . e . , the cluster centroids ) .", "to boost the retrieval process using approximate nearest neighbor search library , we also optimize the matching function with a two - step score calculation procedure .", "experimental results on several popular ranking and qa datasets show that our model can achieve state - of - the - art results while still remaining high efficiency ."], "events": [{"event_type": "ITT", "arguments": [{"text": "retrieval models based on dense representations", "nugget_type": "APP", "argument_type": "Target", "tokens": ["retrieval", "models", "based", "on", "dense", "representations"], "offsets": [3, 4, 5, 6, 7, 8]}], "trigger": {"text": "applied", "tokens": ["applied"], "offsets": [12]}}, {"event_type": "RWS", "arguments": [{"text": "obtain", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["obtain"], "offsets": [34]}, {"text": "basic structure of these models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["basic", "structure", "of", "retrieval", "models", "based", "on", "dense", "representations"], "offsets": [39, 40, 41, 3, 4, 5, 6, 7, 8]}, {"text": "bi - encoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["bi", "-", "encoder"], "offsets": [45, 46, 47]}, {"text": "in most cases", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "most", "cases"], "offsets": [48, 49, 50]}], "trigger": {"text": "is", "tokens": ["is"], "offsets": [44]}}, {"event_type": "PUR", "arguments": [{"text": "high efficiency", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["high", "efficiency"], "offsets": [35, 36]}], "trigger": {"text": "obtain", "tokens": ["obtain"], "offsets": [34]}}, {"event_type": "RWF", "arguments": [{"text": "simple structure", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["simple", "structure"], "offsets": [55, 56]}, {"text": "serious information loss", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["serious", "information", "loss"], "offsets": [59, 60, 61]}, {"text": "during the encoding of documents", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "the", "encoding", "of", "documents"], "offsets": [62, 63, 64, 65, 66]}], "trigger": {"text": "cause", "tokens": ["cause"], "offsets": [58]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [78]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [81]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [79]}}, {"event_type": "MDS", "arguments": [{"text": "iterative clustering process", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["iterative", "clustering", "process"], "offsets": [93, 94, 95]}, {"text": "queries", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["queries"], "offsets": [85]}, {"text": "each of the documents", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["each", "of", "the", "documents"], "offsets": [87, 88, 89, 90]}], "trigger": {"text": "mimic", "tokens": ["mimic"], "offsets": [83]}}, {"event_type": "MDS", "arguments": [{"text": "multiple pseudo queries", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["multiple", "pseudo", "queries"], "offsets": [101, 102, 103]}, {"text": "documents", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["documents"], "offsets": [99]}], "trigger": {"text": "represent", "tokens": ["represent"], "offsets": [97]}}, {"event_type": "MDS", "arguments": [{"text": "matching function", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["matching", "function"], "offsets": [131, 132]}, {"text": "two - step score calculation procedure", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["two", "-", "step", "score", "calculation", "procedure"], "offsets": [135, 136, 137, 138, 139, 140]}], "trigger": {"text": "optimize", "tokens": ["optimize"], "offsets": [129]}}, {"event_type": "MDS", "arguments": [{"text": "retrieval process", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["retrieval", "process"], "offsets": [118, 119]}, {"text": "approximate nearest neighbor search library", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["approximate", "nearest", "neighbor", "search", "library"], "offsets": [121, 122, 123, 124, 125]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [116]}}, {"event_type": "FIN", "arguments": [{"text": "achieve", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["achieve"], "offsets": [156]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [151]}}, {"event_type": "CMP", "arguments": [{"text": "on several popular ranking", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "several", "popular", "ranking"], "offsets": [144, 145, 146, 147]}, {"text": "qa datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["qa", "datasets"], "offsets": [149, 150]}, {"text": "method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["method"], "offsets": [81]}, {"text": "state - of - the - art results", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [157, 158, 159, 160, 161, 162, 163, 164]}, {"text": "high efficiency", "nugget_type": "STR", "argument_type": "Result", "tokens": ["high", "efficiency"], "offsets": [168, 169]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [156]}}], "document": ["recently", ",", "the", "retrieval", "models", "based", "on", "dense", "representations", "have", "been", "gradually", "applied", "in", "the", "first", "stage", "of", "the", "document", "retrieval", "tasks", ",", "showing", "better", "performance", "than", "traditional", "sparse", "vector", "space", "models", ".", "to", "obtain", "high", "efficiency", ",", "the", "basic", "structure", "of", "these", "models", "is", "bi", "-", "encoder", "in", "most", "cases", ".", "however", ",", "this", "simple", "structure", "may", "cause", "serious", "information", "loss", "during", "the", "encoding", "of", "documents", "since", "the", "queries", "are", "agnostic", ".", "to", "address", "this", "problem", ",", "we", "design", "a", "method", "to", "mimic", "the", "queries", "to", "each", "of", "the", "documents", "by", "an", "iterative", "clustering", "process", "and", "represent", "the", "documents", "by", "multiple", "pseudo", "queries", "(", "i", ".", "e", ".", ",", "the", "cluster", "centroids", ")", ".", "to", "boost", "the", "retrieval", "process", "using", "approximate", "nearest", "neighbor", "search", "library", ",", "we", "also", "optimize", "the", "matching", "function", "with", "a", "two", "-", "step", "score", "calculation", "procedure", ".", "experimental", "results", "on", "several", "popular", "ranking", "and", "qa", "datasets", "show", "that", "our", "model", "can", "achieve", "state", "-", "of", "-", "the", "-", "art", "results", "while", "still", "remaining", "high", "efficiency", "."]}, {"venue": "ACL", "title": "Is Sparse Attention more Interpretable?", "abstract": "Sparse attention has been claimed to increase model interpretability under the assumption that it highlights influential inputs. Yet the attention distribution is typically over representations internal to the model rather than the inputs themselves, suggesting this assumption may not have merit. We build on the recent work exploring the interpretability of attention; we design a set of experiments to help us understand how sparsity affects our ability to use attention as an explainability tool. On three text classification tasks, we verify that only a weak relationship between inputs and co-indexed intermediate representations exists\u2014under sparse attention and otherwise. Further, we do not find any plausible mappings from sparse attention distributions to a sparse set of influential inputs through other avenues. Rather, we observe in this setting that inducing sparsity may make it less plausible that attention can be used as a tool for understanding model behavior.", "doc_id": "32a63eeb1e0571b54add55e3194f08c9", "publication_year": 2021, "sentences": ["sparse attention has been claimed to increase model interpretability under the assumption that it highlights influential inputs .", "yet the attention distribution is typically over representations internal to the model rather than the inputs themselves , suggesting this assumption may not have merit .", "we build on the recent work exploring the interpretability of attention ; we design a set of experiments to help us understand how sparsity affects our ability to use attention as an explainability tool .", "on three text classification tasks , we verify that only a weak relationship between inputs and co - indexed intermediate representations exists \u2014 under sparse attention and otherwise .", "further , we do not find any plausible mappings from sparse attention distributions to a sparse set of influential inputs through other avenues .", "rather , we observe in this setting that inducing sparsity may make it less plausible that attention can be used as a tool for understanding model behavior ."], "events": [{"event_type": "ITT", "arguments": [{"text": "sparse attention", "nugget_type": "APP", "argument_type": "Target", "tokens": ["sparse", "attention"], "offsets": [0, 1]}, {"text": "under the assumption that it highlights influential inputs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["under", "the", "assumption", "that", "sparse", "attention", "highlights", "influential", "inputs"], "offsets": [9, 10, 11, 12, 0, 1, 14, 15, 16]}], "trigger": {"text": "increase", "tokens": ["increase"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "may not have merit", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["may", "not", "have", "merit"], "offsets": [39, 40, 41, 42]}], "trigger": {"text": "may not have merit", "tokens": ["may", "not", "have", "merit"], "offsets": [39, 40, 41, 42]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [44]}, {"text": "recent work exploring the interpretability of attention", "nugget_type": "APP", "argument_type": "Content", "tokens": ["recent", "work", "exploring", "the", "interpretability", "of", "attention"], "offsets": [48, 49, 50, 51, 52, 53, 54]}], "trigger": {"text": "build on", "tokens": ["build", "on"], "offsets": [45, 46]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [56]}, {"text": "experiments", "nugget_type": "APP", "argument_type": "Content", "tokens": ["experiments"], "offsets": [61]}, {"text": "understand", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["understand"], "offsets": [65]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [57]}}, {"event_type": "PUR", "arguments": [{"text": "how sparsity affects our ability to use attention as an explainability tool", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["how", "sparsity", "affects", "our", "ability", "to", "use", "attention", "as", "an", "explainability", "tool"], "offsets": [66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77]}], "trigger": {"text": "understand", "tokens": ["understand"], "offsets": [65]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [85]}, {"text": "exists", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["exists"], "offsets": [100]}], "trigger": {"text": "verify", "tokens": ["verify"], "offsets": [86]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [134]}, {"text": "may make", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["may", "make"], "offsets": [142, 143]}], "trigger": {"text": "observe", "tokens": ["observe"], "offsets": [135]}}, {"event_type": "FAC", "arguments": [{"text": "inducing sparsity", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["inducing", "sparsity"], "offsets": [140, 141]}, {"text": "less plausible", "nugget_type": "WEA", "argument_type": "Target", "tokens": ["less", "plausible"], "offsets": [145, 146]}, {"text": "attention", "nugget_type": "APP", "argument_type": "Object", "tokens": ["attention"], "offsets": [148]}], "trigger": {"text": "may make", "tokens": ["may", "make"], "offsets": [142, 143]}}, {"event_type": "FAC", "arguments": [{"text": "under sparse attention and otherwise", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["under", "sparse", "attention", "and", "otherwise"], "offsets": [102, 103, 104, 105, 106]}, {"text": "only", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["only"], "offsets": [88]}, {"text": "weak relationship", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["weak", "relationship"], "offsets": [90, 91]}, {"text": "between inputs and co - indexed intermediate representations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "inputs", "and", "co", "-", "indexed", "intermediate", "representations"], "offsets": [92, 93, 94, 95, 96, 97, 98, 99]}], "trigger": {"text": "exists", "tokens": ["exists"], "offsets": [100]}}], "document": ["sparse", "attention", "has", "been", "claimed", "to", "increase", "model", "interpretability", "under", "the", "assumption", "that", "it", "highlights", "influential", "inputs", ".", "yet", "the", "attention", "distribution", "is", "typically", "over", "representations", "internal", "to", "the", "model", "rather", "than", "the", "inputs", "themselves", ",", "suggesting", "this", "assumption", "may", "not", "have", "merit", ".", "we", "build", "on", "the", "recent", "work", "exploring", "the", "interpretability", "of", "attention", ";", "we", "design", "a", "set", "of", "experiments", "to", "help", "us", "understand", "how", "sparsity", "affects", "our", "ability", "to", "use", "attention", "as", "an", "explainability", "tool", ".", "on", "three", "text", "classification", "tasks", ",", "we", "verify", "that", "only", "a", "weak", "relationship", "between", "inputs", "and", "co", "-", "indexed", "intermediate", "representations", "exists", "\u2014", "under", "sparse", "attention", "and", "otherwise", ".", "further", ",", "we", "do", "not", "find", "any", "plausible", "mappings", "from", "sparse", "attention", "distributions", "to", "a", "sparse", "set", "of", "influential", "inputs", "through", "other", "avenues", ".", "rather", ",", "we", "observe", "in", "this", "setting", "that", "inducing", "sparsity", "may", "make", "it", "less", "plausible", "that", "attention", "can", "be", "used", "as", "a", "tool", "for", "understanding", "model", "behavior", "."]}, {"venue": "ACL", "title": "MedNLI Is Not Immune: Natural Language Inference Artifacts in the Clinical Domain", "abstract": "Crowdworker-constructed natural language inference (NLI) datasets have been found to contain statistical artifacts associated with the annotation process that allow hypothesis-only classifiers to achieve better-than-random performance (CITATION). We investigate whether MedNLI, a physician-annotated dataset with premises extracted from clinical notes, contains such artifacts (CITATION). We find that entailed hypotheses contain generic versions of specific concepts in the premise, as well as modifiers related to responsiveness, duration, and probability. Neutral hypotheses feature conditions and behaviors that co-occur with, or cause, the condition(s) in the premise. Contradiction hypotheses feature explicit negation of the premise and implicit negation via assertion of good health. Adversarial filtering demonstrates that performance degrades when evaluated on the difficult subset. We provide partition information and recommendations for alternative dataset construction strategies for knowledge-intensive domains.", "doc_id": "cd913053a009459dd1155dfd181a1260", "publication_year": 2021, "sentences": ["crowdworker - constructed natural language inference ( nli ) datasets have been found to contain statistical artifacts associated with the annotation process that allow hypothesis - only classifiers to achieve better - than - random performance ( citation ) .", "we investigate whether mednli , a physician - annotated dataset with premises extracted from clinical notes , contains such artifacts ( citation ) .", "we find that entailed hypotheses contain generic versions of specific concepts in the premise , as well as modifiers related to responsiveness , duration , and probability .", "neutral hypotheses feature conditions and behaviors that co - occur with , or cause , the condition ( s ) in the premise .", "contradiction hypotheses feature explicit negation of the premise and implicit negation via assertion of good health .", "adversarial filtering demonstrates that performance degrades when evaluated on the difficult subset .", "we provide partition information and recommendations for alternative dataset construction strategies for knowledge - intensive domains ."], "events": [{"event_type": "ITT", "arguments": [{"text": "statistical artifacts", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["statistical", "artifacts"], "offsets": [15, 16]}], "trigger": {"text": "contain", "tokens": ["contain"], "offsets": [14]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [40]}, {"text": "mednli", "nugget_type": "DST", "argument_type": "Content", "tokens": ["mednli"], "offsets": [43]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [41]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [64]}, {"text": "contain", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["contain"], "offsets": [69]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [65]}}, {"event_type": "FAC", "arguments": [{"text": "entailed hypotheses", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["entailed", "hypotheses"], "offsets": [67, 68]}, {"text": "modifiers", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["modifiers"], "offsets": [82]}, {"text": "generic versions of specific concepts", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["generic", "versions", "of", "specific", "concepts"], "offsets": [70, 71, 72, 73, 74]}, {"text": "in the premise", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "premise"], "offsets": [75, 76, 77]}, {"text": "related to responsiveness", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["related", "to", "responsiveness"], "offsets": [83, 84, 85]}, {"text": "duration", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["duration"], "offsets": [87]}, {"text": "probability", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["probability"], "offsets": [90]}], "trigger": {"text": "contain", "tokens": ["contain"], "offsets": [69]}}, {"event_type": "FAC", "arguments": [{"text": "neutral hypotheses", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["neutral", "hypotheses"], "offsets": [92, 93]}, {"text": "conditions and behaviors", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["conditions", "and", "behaviors"], "offsets": [95, 96, 97]}, {"text": "that co - occur with , or cause , the condition ( s ) in the premise", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["that", "co", "-", "occur", "with", "or", "cause", "the", "condition", "in", "the", "premise"], "offsets": [98, 99, 100, 101, 102, 104, 105, 107, 108, 112, 113, 114]}], "trigger": {"text": "feature", "tokens": ["feature"], "offsets": [94]}}, {"event_type": "FAC", "arguments": [{"text": "contradiction hypotheses", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["contradiction", "hypotheses"], "offsets": [116, 117]}, {"text": "implicit negation", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["implicit", "negation"], "offsets": [125, 126]}, {"text": "explicit negation of the premise", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["explicit", "negation", "of", "the", "premise"], "offsets": [119, 120, 121, 122, 123]}, {"text": "via assertion of good health", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "assertion", "of", "good", "health"], "offsets": [127, 128, 129, 130, 131]}], "trigger": {"text": "feature", "tokens": ["feature"], "offsets": [118]}}, {"event_type": "FAC", "arguments": [{"text": "performance", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["performance"], "offsets": [137]}, {"text": "when evaluated on the difficult subset", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "evaluated", "on", "the", "difficult", "subset"], "offsets": [139, 140, 141, 142, 143, 144]}], "trigger": {"text": "degrades", "tokens": ["degrades"], "offsets": [138]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [146]}, {"text": "alternative dataset construction strategies", "nugget_type": "APP", "argument_type": "Target", "tokens": ["alternative", "dataset", "construction", "strategies"], "offsets": [153, 154, 155, 156]}, {"text": "partition information", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["partition", "information"], "offsets": [148, 149]}, {"text": "recommendations", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["recommendations"], "offsets": [151]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [147]}}, {"event_type": "FIN", "arguments": [{"text": "degrades", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["degrades"], "offsets": [138]}], "trigger": {"text": "demonstrates", "tokens": ["demonstrates"], "offsets": [135]}}], "document": ["crowdworker", "-", "constructed", "natural", "language", "inference", "(", "nli", ")", "datasets", "have", "been", "found", "to", "contain", "statistical", "artifacts", "associated", "with", "the", "annotation", "process", "that", "allow", "hypothesis", "-", "only", "classifiers", "to", "achieve", "better", "-", "than", "-", "random", "performance", "(", "citation", ")", ".", "we", "investigate", "whether", "mednli", ",", "a", "physician", "-", "annotated", "dataset", "with", "premises", "extracted", "from", "clinical", "notes", ",", "contains", "such", "artifacts", "(", "citation", ")", ".", "we", "find", "that", "entailed", "hypotheses", "contain", "generic", "versions", "of", "specific", "concepts", "in", "the", "premise", ",", "as", "well", "as", "modifiers", "related", "to", "responsiveness", ",", "duration", ",", "and", "probability", ".", "neutral", "hypotheses", "feature", "conditions", "and", "behaviors", "that", "co", "-", "occur", "with", ",", "or", "cause", ",", "the", "condition", "(", "s", ")", "in", "the", "premise", ".", "contradiction", "hypotheses", "feature", "explicit", "negation", "of", "the", "premise", "and", "implicit", "negation", "via", "assertion", "of", "good", "health", ".", "adversarial", "filtering", "demonstrates", "that", "performance", "degrades", "when", "evaluated", "on", "the", "difficult", "subset", ".", "we", "provide", "partition", "information", "and", "recommendations", "for", "alternative", "dataset", "construction", "strategies", "for", "knowledge", "-", "intensive", "domains", "."]}, {"venue": "ACL", "title": "A Transformer-based Approach for Source Code Summarization", "abstract": "Generating a readable summary that describes the functionality of a program is known as source code summarization. In this task, learning code representation by modeling the pairwise relationship between code tokens to capture their long-range dependencies is crucial. To learn code representation for summarization, we explore the Transformer model that uses a self-attention mechanism and has shown to be effective in capturing long-range dependencies. In this work, we show that despite the approach is simple, it outperforms the state-of-the-art techniques by a significant margin. We perform extensive analysis and ablation studies that reveal several important findings, e.g., the absolute encoding of source code tokens\u2019 position hinders, while relative encoding significantly improves the summarization performance. We have made our code publicly available to facilitate future research.", "doc_id": "9b8e57f3f4734b494281b19e7b89f3b0", "publication_year": 2020, "sentences": ["generating a readable summary that describes the functionality of a program is known as source code summarization .", "in this task , learning code representation by modeling the pairwise relationship between code tokens to capture their long - range dependencies is crucial .", "to learn code representation for summarization , we explore the transformer model that uses a self - attention mechanism and has shown to be effective in capturing long - range dependencies .", "in this work , we show that despite the approach is simple , it outperforms the state - of - the - art techniques by a significant margin .", "we perform extensive analysis and ablation studies that reveal several important findings , e . g . , the absolute encoding of source code tokens \u2019 position hinders , while relative encoding significantly improves the summarization performance .", "we have made our code publicly available to facilitate future research ."], "events": [{"event_type": "ITT", "arguments": [{"text": "source code summarization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["source", "code", "summarization"], "offsets": [14, 15, 16]}], "trigger": {"text": "known", "tokens": ["known"], "offsets": [12]}}, {"event_type": "PRP", "arguments": [{"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [44]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [50]}, {"text": "transformer model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["transformer", "model"], "offsets": [53, 54]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [51]}}, {"event_type": "PUR", "arguments": [{"text": "code representation for summarization", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["code", "representation", "for", "summarization"], "offsets": [45, 46, 47, 48]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [44]}}, {"event_type": "MDS", "arguments": [{"text": "self - attention mechanism", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["self", "-", "attention", "mechanism"], "offsets": [58, 59, 60, 61]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [56]}}, {"event_type": "FAC", "arguments": [{"text": "in capturing long - range dependencies", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "capturing", "long", "-", "range", "dependencies"], "offsets": [68, 69, 70, 71, 72, 73]}, {"text": "transformer model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["transformer", "model"], "offsets": [53, 54]}, {"text": "effective", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["effective"], "offsets": [67]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [64]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [89]}, {"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [79]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [80]}}, {"event_type": "CMP", "arguments": [{"text": "transformer model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["transformer", "model"], "offsets": [53, 54]}, {"text": "state - of - the - art techniques", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "techniques"], "offsets": [91, 92, 93, 94, 95, 96, 97, 98]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [89]}, {"text": "significant", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significant"], "offsets": [101]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [89]}}, {"event_type": "FAC", "arguments": [{"text": "absolute encoding of source code tokens \u2019 position", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["absolute", "encoding", "of", "source", "code", "tokens", "\u2019", "position"], "offsets": [123, 124, 125, 126, 127, 128, 129, 130]}], "trigger": {"text": "hinders", "tokens": ["hinders"], "offsets": [131]}}, {"event_type": "CMP", "arguments": [{"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [136]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [137]}, {"text": "summarization performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["summarization", "performance"], "offsets": [139, 140]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [137]}}], "document": ["generating", "a", "readable", "summary", "that", "describes", "the", "functionality", "of", "a", "program", "is", "known", "as", "source", "code", "summarization", ".", "in", "this", "task", ",", "learning", "code", "representation", "by", "modeling", "the", "pairwise", "relationship", "between", "code", "tokens", "to", "capture", "their", "long", "-", "range", "dependencies", "is", "crucial", ".", "to", "learn", "code", "representation", "for", "summarization", ",", "we", "explore", "the", "transformer", "model", "that", "uses", "a", "self", "-", "attention", "mechanism", "and", "has", "shown", "to", "be", "effective", "in", "capturing", "long", "-", "range", "dependencies", ".", "in", "this", "work", ",", "we", "show", "that", "despite", "the", "approach", "is", "simple", ",", "it", "outperforms", "the", "state", "-", "of", "-", "the", "-", "art", "techniques", "by", "a", "significant", "margin", ".", "we", "perform", "extensive", "analysis", "and", "ablation", "studies", "that", "reveal", "several", "important", "findings", ",", "e", ".", "g", ".", ",", "the", "absolute", "encoding", "of", "source", "code", "tokens", "\u2019", "position", "hinders", ",", "while", "relative", "encoding", "significantly", "improves", "the", "summarization", "performance", ".", "we", "have", "made", "our", "code", "publicly", "available", "to", "facilitate", "future", "research", "."]}, {"venue": "ACL", "title": "Temporally-Informed Analysis of Named Entity Recognition", "abstract": "Natural language processing models often have to make predictions on text data that evolves over time as a result of changes in language use or the information described in the text. However, evaluation results on existing data sets are seldom reported by taking the timestamp of the document into account. We analyze and propose methods that make better use of temporally-diverse training data, with a focus on the task of named entity recognition. To support these experiments, we introduce a novel data set of English tweets annotated with named entities. We empirically demonstrate the effect of temporal drift on performance, and how the temporal information of documents can be used to obtain better models compared to those that disregard temporal information. Our analysis gives insights into why this information is useful, in the hope of informing potential avenues of improvement for named entity recognition as well as other NLP tasks under similar experimental setups.", "doc_id": "994f10634820d66c49d24dcffe6ec7f0", "publication_year": 2020, "sentences": ["natural language processing models often have to make predictions on text data that evolves over time as a result of changes in language use or the information described in the text .", "however , evaluation results on existing data sets are seldom reported by taking the timestamp of the document into account .", "we analyze and propose methods that make better use of temporally - diverse training data , with a focus on the task of named entity recognition .", "to support these experiments , we introduce a novel data set of english tweets annotated with named entities .", "we empirically demonstrate the effect of temporal drift on performance , and how the temporal information of documents can be used to obtain better models compared to those that disregard temporal information .", "our analysis gives insights into why this information is useful , in the hope of informing potential avenues of improvement for named entity recognition as well as other nlp tasks under similar experimental setups ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language processing models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["natural", "language", "processing", "models"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "often have to make", "tokens": ["often", "have", "to", "make"], "offsets": [4, 5, 6, 7]}}, {"event_type": "RWF", "arguments": [{"text": "existing data sets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["existing", "data", "sets"], "offsets": [37, 38, 39]}, {"text": "seldom reported", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["seldom", "reported"], "offsets": [41, 42]}, {"text": "taking the timestamp of the document into account", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["taking", "the", "timestamp", "of", "the", "document", "into", "account"], "offsets": [44, 45, 46, 47, 48, 49, 50, 51]}], "trigger": {"text": "seldom reported", "tokens": ["seldom", "reported"], "offsets": [41, 42]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [53]}, {"text": "methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["methods"], "offsets": [57]}, {"text": "make better use", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["make", "better", "use"], "offsets": [59, 60, 61]}], "trigger": {"text": "analyze and propose", "tokens": ["analyze", "and", "propose"], "offsets": [54, 55, 56]}}, {"event_type": "PUR", "arguments": [{"text": "temporally - diverse training data", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["temporally", "-", "diverse", "training", "data"], "offsets": [63, 64, 65, 66, 67]}], "trigger": {"text": "make better use", "tokens": ["make", "better", "use"], "offsets": [59, 60, 61]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [85]}, {"text": "data set of english tweets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["data", "set", "of", "english", "tweets"], "offsets": [89, 90, 91, 92, 93]}, {"text": "support", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["support"], "offsets": [81]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [86]}}, {"event_type": "PUR", "arguments": [{"text": "these experiments", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["these", "experiments"], "offsets": [82, 83]}], "trigger": {"text": "support", "tokens": ["support"], "offsets": [81]}}, {"event_type": "FAC", "arguments": [{"text": "effect of temporal drift", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["effect", "of", "temporal", "drift"], "offsets": [103, 104, 105, 106]}, {"text": "on performance", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "performance"], "offsets": [107, 108]}], "trigger": {"text": "empirically demonstrate", "tokens": ["empirically", "demonstrate"], "offsets": [100, 101]}}, {"event_type": "CMP", "arguments": [{"text": "temporal information of documents can be used", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["temporal", "information", "of", "documents", "can", "be", "used"], "offsets": [113, 114, 115, 116, 117, 118, 119]}, {"text": "those that disregard temporal information", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["those", "that", "disregard", "temporal", "information"], "offsets": [126, 127, 128, 129, 130]}, {"text": "models", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["models"], "offsets": [123]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [122]}], "trigger": {"text": "obtain", "tokens": ["obtain"], "offsets": [121]}}, {"event_type": "WKS", "arguments": [{"text": "insights", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["insights"], "offsets": [135]}, {"text": "informing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["informing"], "offsets": [147]}, {"text": "information", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["information"], "offsets": [139]}], "trigger": {"text": "gives", "tokens": ["gives"], "offsets": [134]}}, {"event_type": "PUR", "arguments": [{"text": "potential avenues of improvement", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["potential", "avenues", "of", "improvement"], "offsets": [148, 149, 150, 151]}], "trigger": {"text": "informing", "tokens": ["informing"], "offsets": [147]}}], "document": ["natural", "language", "processing", "models", "often", "have", "to", "make", "predictions", "on", "text", "data", "that", "evolves", "over", "time", "as", "a", "result", "of", "changes", "in", "language", "use", "or", "the", "information", "described", "in", "the", "text", ".", "however", ",", "evaluation", "results", "on", "existing", "data", "sets", "are", "seldom", "reported", "by", "taking", "the", "timestamp", "of", "the", "document", "into", "account", ".", "we", "analyze", "and", "propose", "methods", "that", "make", "better", "use", "of", "temporally", "-", "diverse", "training", "data", ",", "with", "a", "focus", "on", "the", "task", "of", "named", "entity", "recognition", ".", "to", "support", "these", "experiments", ",", "we", "introduce", "a", "novel", "data", "set", "of", "english", "tweets", "annotated", "with", "named", "entities", ".", "we", "empirically", "demonstrate", "the", "effect", "of", "temporal", "drift", "on", "performance", ",", "and", "how", "the", "temporal", "information", "of", "documents", "can", "be", "used", "to", "obtain", "better", "models", "compared", "to", "those", "that", "disregard", "temporal", "information", ".", "our", "analysis", "gives", "insights", "into", "why", "this", "information", "is", "useful", ",", "in", "the", "hope", "of", "informing", "potential", "avenues", "of", "improvement", "for", "named", "entity", "recognition", "as", "well", "as", "other", "nlp", "tasks", "under", "similar", "experimental", "setups", "."]}, {"venue": "ACL", "title": "ERASER: A Benchmark to Evaluate Rationalized NLP Models", "abstract": "State-of-the-art models in NLP are now predominantly based on deep neural networks that are opaque in terms of how they come to make predictions. This limitation has increased interest in designing more interpretable deep models for NLP that reveal the \u2018reasoning\u2019 behind model outputs. But work in this direction has been conducted on different datasets and tasks with correspondingly unique aims and metrics; this makes it difficult to track progress. We propose the Evaluating Rationales And Simple English Reasoning (ERASER a benchmark to advance research on interpretable models in NLP. This benchmark comprises multiple datasets and tasks for which human annotations of \u201crationales\u201d (supporting evidence) have been collected. We propose several metrics that aim to capture how well the rationales provided by models align with human rationales, and also how faithful these rationales are (i.e., the degree to which provided rationales influenced the corresponding predictions). Our hope is that releasing this benchmark facilitates progress on designing more interpretable NLP systems. The benchmark, code, and documentation are available at https://www.eraserbenchmark.com/", "doc_id": "a184c588cfd0c7ae463e8b974b4acf0a", "publication_year": 2020, "sentences": ["state - of - the - art models in nlp are now predominantly based on deep neural networks that are opaque in terms of how they come to make predictions .", "this limitation has increased interest in designing more interpretable deep models for nlp that reveal the \u2018 reasoning \u2019 behind model outputs .", "but work in this direction has been conducted on different datasets and tasks with correspondingly unique aims and metrics ; this makes it difficult to track progress .", "we propose the evaluating rationales and simple english reasoning ( eraser a benchmark to advance research on interpretable models in nlp .", "this benchmark comprises multiple datasets and tasks for which human annotations of \u201c rationales \u201d ( supporting evidence ) have been collected .", "we propose several metrics that aim to capture how well the rationales provided by models align with human rationales , and also how faithful these rationales are ( i . e . , the degree to which provided rationales influenced the corresponding predictions ) .", "our hope is that releasing this benchmark facilitates progress on designing more interpretable nlp systems .", "the benchmark , code , and documentation are available at https : / / www . eraserbenchmark . com /"], "events": [{"event_type": "ITT", "arguments": [{"text": "nlp", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nlp"], "offsets": [9]}], "trigger": {"text": "based", "tokens": ["based"], "offsets": [13]}}, {"event_type": "RWF", "arguments": [{"text": "track", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["track"], "offsets": [79]}, {"text": "difficult", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["difficult"], "offsets": [77]}], "trigger": {"text": "makes", "tokens": ["makes"], "offsets": [75]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [82]}, {"text": "evaluating rationales and simple english reasoning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["evaluating", "rationales", "and", "simple", "english", "reasoning"], "offsets": [85, 86, 87, 88, 89, 90]}, {"text": "advance", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["advance"], "offsets": [96]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [83]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [127]}, {"text": "several metrics", "nugget_type": "APP", "argument_type": "Content", "tokens": ["several", "metrics"], "offsets": [129, 130]}, {"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [134]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [128]}}, {"event_type": "PUR", "arguments": [{"text": "how well the rationales", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["how", "well", "the", "rationales"], "offsets": [135, 136, 137, 138]}, {"text": "how faithful these rationales", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["how", "faithful", "these", "rationales"], "offsets": [149, 150, 151, 152]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [134]}}, {"event_type": "RWF", "arguments": [{"text": "in terms of how they come to make predictions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "terms", "of", "how", "they", "come", "to", "make", "predictions"], "offsets": [21, 22, 23, 24, 25, 26, 27, 28, 29]}, {"text": "opaque", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["opaque"], "offsets": [20]}], "trigger": {"text": "opaque", "tokens": ["opaque"], "offsets": [20]}}, {"event_type": "RWF", "arguments": [{"text": "work in this direction", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["work", "in", "this", "direction"], "offsets": [55, 56, 57, 58]}, {"text": "on different datasets and tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "different", "datasets", "and", "tasks"], "offsets": [62, 63, 64, 65, 66]}, {"text": "unique aims", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unique", "aims"], "offsets": [69, 70]}, {"text": "unique metrics", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unique", "metrics"], "offsets": [69, 72]}], "trigger": {"text": "conducted", "tokens": ["conducted"], "offsets": [61]}}, {"event_type": "PUR", "arguments": [{"text": "progress", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["progress"], "offsets": [80]}], "trigger": {"text": "track", "tokens": ["track"], "offsets": [79]}}, {"event_type": "PUR", "arguments": [{"text": "research on interpretable models", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["research", "on", "interpretable", "models"], "offsets": [97, 98, 99, 100]}, {"text": "in nlp", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "nlp"], "offsets": [101, 102]}], "trigger": {"text": "advance", "tokens": ["advance"], "offsets": [96]}}], "document": ["state", "-", "of", "-", "the", "-", "art", "models", "in", "nlp", "are", "now", "predominantly", "based", "on", "deep", "neural", "networks", "that", "are", "opaque", "in", "terms", "of", "how", "they", "come", "to", "make", "predictions", ".", "this", "limitation", "has", "increased", "interest", "in", "designing", "more", "interpretable", "deep", "models", "for", "nlp", "that", "reveal", "the", "\u2018", "reasoning", "\u2019", "behind", "model", "outputs", ".", "but", "work", "in", "this", "direction", "has", "been", "conducted", "on", "different", "datasets", "and", "tasks", "with", "correspondingly", "unique", "aims", "and", "metrics", ";", "this", "makes", "it", "difficult", "to", "track", "progress", ".", "we", "propose", "the", "evaluating", "rationales", "and", "simple", "english", "reasoning", "(", "eraser", "a", "benchmark", "to", "advance", "research", "on", "interpretable", "models", "in", "nlp", ".", "this", "benchmark", "comprises", "multiple", "datasets", "and", "tasks", "for", "which", "human", "annotations", "of", "\u201c", "rationales", "\u201d", "(", "supporting", "evidence", ")", "have", "been", "collected", ".", "we", "propose", "several", "metrics", "that", "aim", "to", "capture", "how", "well", "the", "rationales", "provided", "by", "models", "align", "with", "human", "rationales", ",", "and", "also", "how", "faithful", "these", "rationales", "are", "(", "i", ".", "e", ".", ",", "the", "degree", "to", "which", "provided", "rationales", "influenced", "the", "corresponding", "predictions", ")", ".", "our", "hope", "is", "that", "releasing", "this", "benchmark", "facilitates", "progress", "on", "designing", "more", "interpretable", "nlp", "systems", ".", "the", "benchmark", ",", "code", ",", "and", "documentation", "are", "available", "at", "https", ":", "/", "/", "www", ".", "eraserbenchmark", ".", "com", "/"]}, {"venue": "ACL", "title": "The SOFC-Exp Corpus and Neural Approaches to Information Extraction in the Materials Science Domain", "abstract": "This paper presents a new challenging information extraction task in the domain of materials science. We develop an annotation scheme for marking information on experiments related to solid oxide fuel cells in scientific publications, such as involved materials and measurement conditions. With this paper, we publish our annotation guidelines, as well as our SOFC-Exp corpus consisting of 45 open-access scholarly articles annotated by domain experts. A corpus and an inter-annotator agreement study demonstrate the complexity of the suggested named entity recognition and slot filling tasks as well as high annotation quality. We also present strong neural-network based models for a variety of tasks that can be addressed on the basis of our new data set. On all tasks, using BERT embeddings leads to large performance gains, but with increasing task complexity, adding a recurrent neural network on top seems beneficial. Our models will serve as competitive baselines in future work, and analysis of their performance highlights difficult cases when modeling the data and suggests promising research directions.", "doc_id": "4f34d2d906a166a193eae5b49634d4f6", "publication_year": 2020, "sentences": ["this paper presents a new challenging information extraction task in the domain of materials science .", "we develop an annotation scheme for marking information on experiments related to solid oxide fuel cells in scientific publications , such as involved materials and measurement conditions .", "with this paper , we publish our annotation guidelines , as well as our sofc - exp corpus consisting of 45 open - access scholarly articles annotated by domain experts .", "a corpus and an inter - annotator agreement study demonstrate the complexity of the suggested named entity recognition and slot filling tasks as well as high annotation quality .", "we also present strong neural - network based models for a variety of tasks that can be addressed on the basis of our new data set .", "on all tasks , using bert embeddings leads to large performance gains , but with increasing task complexity , adding a recurrent neural network on top seems beneficial .", "our models will serve as competitive baselines in future work , and analysis of their performance highlights difficult cases when modeling the data and suggests promising research directions ."], "events": [{"event_type": "WKS", "arguments": [{"text": "in the domain of materials science", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "domain", "of", "materials", "science"], "offsets": [9, 10, 11, 12, 13, 14]}, {"text": "new challenging information extraction task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["new", "challenging", "information", "extraction", "task"], "offsets": [4, 5, 6, 7, 8]}], "trigger": {"text": "presents", "tokens": ["presents"], "offsets": [2]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [16]}, {"text": "marking", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["marking"], "offsets": [22]}, {"text": "annotation scheme", "nugget_type": "APP", "argument_type": "Content", "tokens": ["annotation", "scheme"], "offsets": [19, 20]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [17]}}, {"event_type": "PUR", "arguments": [{"text": "information on experiments related to solid oxide fuel cells", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["information", "on", "experiments", "related", "to", "solid", "oxide", "fuel", "cells"], "offsets": [23, 24, 25, 26, 27, 28, 29, 30, 31]}, {"text": "in scientific publications", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "scientific", "publications"], "offsets": [32, 33, 34]}], "trigger": {"text": "marking", "tokens": ["marking"], "offsets": [22]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [48]}, {"text": "annotation guidelines", "nugget_type": "APP", "argument_type": "Content", "tokens": ["annotation", "guidelines"], "offsets": [51, 52]}, {"text": "sofc - exp corpus", "nugget_type": "DST", "argument_type": "Content", "tokens": ["sofc", "-", "exp", "corpus"], "offsets": [58, 59, 60, 61]}], "trigger": {"text": "publish", "tokens": ["publish"], "offsets": [49]}}, {"event_type": "FAC", "arguments": [{"text": "complexity of the suggested named entity recognition and slot filling tasks", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["complexity", "of", "the", "suggested", "named", "entity", "recognition", "and", "slot", "filling", "tasks"], "offsets": [86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96]}, {"text": "high annotation quality", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["high", "annotation", "quality"], "offsets": [100, 101, 102]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [84]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [104]}, {"text": "strong neural - network based models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["strong", "neural", "-", "network", "based", "models"], "offsets": [107, 108, 109, 110, 111, 112]}, {"text": "variety of tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["variety", "of", "tasks"], "offsets": [115, 116, 117]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [106]}}, {"event_type": "FAC", "arguments": [{"text": "large performance gains", "nugget_type": "STR", "argument_type": "Object", "tokens": ["large", "performance", "gains"], "offsets": [140, 141, 142]}, {"text": "bert embeddings", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["bert", "embeddings"], "offsets": [136, 137]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [138]}}, {"event_type": "FAC", "arguments": [{"text": "with increasing task complexity", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "increasing", "task", "complexity"], "offsets": [145, 146, 147, 148]}, {"text": "adding a recurrent neural network on top", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["adding", "a", "recurrent", "neural", "network", "on", "top"], "offsets": [150, 151, 152, 153, 154, 155, 156]}, {"text": "beneficial", "nugget_type": "STR", "argument_type": "Object", "tokens": ["beneficial"], "offsets": [158]}], "trigger": {"text": "seems", "tokens": ["seems"], "offsets": [157]}}], "document": ["this", "paper", "presents", "a", "new", "challenging", "information", "extraction", "task", "in", "the", "domain", "of", "materials", "science", ".", "we", "develop", "an", "annotation", "scheme", "for", "marking", "information", "on", "experiments", "related", "to", "solid", "oxide", "fuel", "cells", "in", "scientific", "publications", ",", "such", "as", "involved", "materials", "and", "measurement", "conditions", ".", "with", "this", "paper", ",", "we", "publish", "our", "annotation", "guidelines", ",", "as", "well", "as", "our", "sofc", "-", "exp", "corpus", "consisting", "of", "45", "open", "-", "access", "scholarly", "articles", "annotated", "by", "domain", "experts", ".", "a", "corpus", "and", "an", "inter", "-", "annotator", "agreement", "study", "demonstrate", "the", "complexity", "of", "the", "suggested", "named", "entity", "recognition", "and", "slot", "filling", "tasks", "as", "well", "as", "high", "annotation", "quality", ".", "we", "also", "present", "strong", "neural", "-", "network", "based", "models", "for", "a", "variety", "of", "tasks", "that", "can", "be", "addressed", "on", "the", "basis", "of", "our", "new", "data", "set", ".", "on", "all", "tasks", ",", "using", "bert", "embeddings", "leads", "to", "large", "performance", "gains", ",", "but", "with", "increasing", "task", "complexity", ",", "adding", "a", "recurrent", "neural", "network", "on", "top", "seems", "beneficial", ".", "our", "models", "will", "serve", "as", "competitive", "baselines", "in", "future", "work", ",", "and", "analysis", "of", "their", "performance", "highlights", "difficult", "cases", "when", "modeling", "the", "data", "and", "suggests", "promising", "research", "directions", "."]}, {"venue": "ACL", "title": "HateCheck: Functional Tests for Hate Speech Detection Models", "abstract": "Detecting online hate is a difficult task that even state-of-the-art models struggle with. Typically, hate speech detection models are evaluated by measuring their performance on held-out test data using metrics such as accuracy and F1 score. However, this approach makes it difficult to identify specific model weak points. It also risks overestimating generalisable model performance due to increasingly well-evidenced systematic gaps and biases in hate speech datasets. To enable more targeted diagnostic insights, we introduce HateCheck, a suite of functional tests for hate speech detection models. We specify 29 model functionalities motivated by a review of previous research and a series of interviews with civil society stakeholders. We craft test cases for each functionality and validate their quality through a structured annotation process. To illustrate HateCheck\u2019s utility, we test near-state-of-the-art transformer models as well as two popular commercial models, revealing critical model weaknesses.", "doc_id": "a7650e539f0a17e41f88e0d467a54aa1", "publication_year": 2021, "sentences": ["detecting online hate is a difficult task that even state - of - the - art models struggle with .", "typically , hate speech detection models are evaluated by measuring their performance on held - out test data using metrics such as accuracy and f1 score .", "however , this approach makes it difficult to identify specific model weak points .", "it also risks overestimating generalisable model performance due to increasingly well - evidenced systematic gaps and biases in hate speech datasets .", "to enable more targeted diagnostic insights , we introduce hatecheck , a suite of functional tests for hate speech detection models .", "we specify 29 model functionalities motivated by a review of previous research and a series of interviews with civil society stakeholders .", "we craft test cases for each functionality and validate their quality through a structured annotation process .", "to illustrate hatecheck \u2019 s utility , we test near - state - of - the - art transformer models as well as two popular commercial models , revealing critical model weaknesses ."], "events": [{"event_type": "PUR", "arguments": [{"text": "hatecheck \u2019 s utility", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["hatecheck", "\u2019", "s", "utility"], "offsets": [146, 147, 148, 149]}], "trigger": {"text": "illustrate", "tokens": ["illustrate"], "offsets": [145]}}, {"event_type": "RWF", "arguments": [{"text": "hate speech detection models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["hate", "speech", "detection", "models"], "offsets": [22, 23, 24, 25]}, {"text": "difficult to identify", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["difficult", "to", "identify"], "offsets": [53, 54, 55]}], "trigger": {"text": "difficult to identify", "tokens": ["difficult", "to", "identify"], "offsets": [53, 54, 55]}}, {"event_type": "RWF", "arguments": [{"text": "hate speech detection models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["hate", "speech", "detection", "models"], "offsets": [22, 23, 24, 25]}, {"text": "overestimating", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["overestimating"], "offsets": [64]}], "trigger": {"text": "overestimating", "tokens": ["overestimating"], "offsets": [64]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [90]}, {"text": "hatecheck", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["hatecheck"], "offsets": [92]}, {"text": "hate speech detection models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["hate", "speech", "detection", "models"], "offsets": [100, 101, 102, 103]}, {"text": "enable", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enable"], "offsets": [84]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [91]}}, {"event_type": "PUR", "arguments": [{"text": "more targeted diagnostic insights", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["more", "targeted", "diagnostic", "insights"], "offsets": [85, 86, 87, 88]}], "trigger": {"text": "enable", "tokens": ["enable"], "offsets": [84]}}, {"event_type": "WKS", "arguments": [{"text": "previous research", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["previous", "research"], "offsets": [115, 116]}, {"text": "series of interviews", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["series", "of", "interviews"], "offsets": [119, 120, 121]}, {"text": "with civil society stakeholders", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "civil", "society", "stakeholders"], "offsets": [122, 123, 124, 125]}, {"text": "specify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["specify"], "offsets": [106]}], "trigger": {"text": "review", "tokens": ["review"], "offsets": [113]}}, {"event_type": "PUR", "arguments": [{"text": "29 model functionalities", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["29", "model", "functionalities"], "offsets": [107, 108, 109]}], "trigger": {"text": "specify", "tokens": ["specify"], "offsets": [106]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [127]}, {"text": "test cases", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["test", "cases"], "offsets": [129, 130]}, {"text": "each functionality", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["each", "functionality"], "offsets": [132, 133]}], "trigger": {"text": "craft", "tokens": ["craft"], "offsets": [128]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [127]}, {"text": "their quality", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["test", "cases", "quality"], "offsets": [129, 130, 137]}, {"text": "through a structured annotation process", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "a", "structured", "annotation", "process"], "offsets": [138, 139, 140, 141, 142]}], "trigger": {"text": "validate", "tokens": ["validate"], "offsets": [135]}}, {"event_type": "WKS", "arguments": [{"text": "illustrate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["illustrate"], "offsets": [145]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [151]}, {"text": "near - state - of - the - art transformer models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["near", "-", "state", "-", "of", "-", "the", "-", "art", "transformer", "models"], "offsets": [153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163]}, {"text": "two popular commercial models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "popular", "commercial", "models"], "offsets": [167, 168, 169, 170]}, {"text": "revealing critical model weaknesses", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["revealing", "critical", "model", "weaknesses"], "offsets": [172, 173, 174, 175]}], "trigger": {"text": "test", "tokens": ["test"], "offsets": [152]}}, {"event_type": "ITT", "arguments": [{"text": "hate speech detection models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["hate", "speech", "detection", "models"], "offsets": [22, 23, 24, 25]}], "trigger": {"text": "evaluated", "tokens": ["evaluated"], "offsets": [27]}}], "document": ["detecting", "online", "hate", "is", "a", "difficult", "task", "that", "even", "state", "-", "of", "-", "the", "-", "art", "models", "struggle", "with", ".", "typically", ",", "hate", "speech", "detection", "models", "are", "evaluated", "by", "measuring", "their", "performance", "on", "held", "-", "out", "test", "data", "using", "metrics", "such", "as", "accuracy", "and", "f1", "score", ".", "however", ",", "this", "approach", "makes", "it", "difficult", "to", "identify", "specific", "model", "weak", "points", ".", "it", "also", "risks", "overestimating", "generalisable", "model", "performance", "due", "to", "increasingly", "well", "-", "evidenced", "systematic", "gaps", "and", "biases", "in", "hate", "speech", "datasets", ".", "to", "enable", "more", "targeted", "diagnostic", "insights", ",", "we", "introduce", "hatecheck", ",", "a", "suite", "of", "functional", "tests", "for", "hate", "speech", "detection", "models", ".", "we", "specify", "29", "model", "functionalities", "motivated", "by", "a", "review", "of", "previous", "research", "and", "a", "series", "of", "interviews", "with", "civil", "society", "stakeholders", ".", "we", "craft", "test", "cases", "for", "each", "functionality", "and", "validate", "their", "quality", "through", "a", "structured", "annotation", "process", ".", "to", "illustrate", "hatecheck", "\u2019", "s", "utility", ",", "we", "test", "near", "-", "state", "-", "of", "-", "the", "-", "art", "transformer", "models", "as", "well", "as", "two", "popular", "commercial", "models", ",", "revealing", "critical", "model", "weaknesses", "."]}, {"venue": "ACL", "title": "KQA Pro: A Dataset with Explicit Compositional Programs for Complex Question Answering over Knowledge Base", "abstract": "Complex question answering over knowledge base (Complex KBQA) is challenging because it requires various compositional reasoning capabilities, such as multi-hop inference, attribute comparison, set operation, etc. Existing benchmarks have some shortcomings that limit the development of Complex KBQA: 1) they only provide QA pairs without explicit reasoning processes; 2) questions are poor in diversity or scale. To this end, we introduce KQA Pro, a dataset for Complex KBQA including around 120K diverse natural language questions. We introduce a compositional and interpretable programming language KoPL to represent the reasoning process of complex questions. For each question, we provide the corresponding KoPL program and SPARQL query, so that KQA Pro can serve for both KBQA and semantic parsing tasks. Experimental results show that state-of-the-art KBQA methods cannot achieve promising results on KQA Pro as on current datasets, which suggests that KQA Pro is challenging and Complex KBQA requires further research efforts. We also treat KQA Pro as a diagnostic dataset for testing multiple reasoning skills, conduct a thorough evaluation of existing models and discuss further directions for Complex KBQA. Our codes and datasets can be obtained from https://github.com/shijx12/KQAPro_Baselines.", "doc_id": "300626c9fd68ca42e0a331192a490d0a", "publication_year": 2022, "sentences": ["complex question answering over knowledge base ( complex kbqa ) is challenging because it requires various compositional reasoning capabilities , such as multi - hop inference , attribute comparison , set operation , etc .", "existing benchmarks have some shortcomings that limit the development of complex kbqa : 1 ) they only provide qa pairs without explicit reasoning processes ; 2 ) questions are poor in diversity or scale .", "to this end , we introduce kqa pro , a dataset for complex kbqa including around 120k diverse natural language questions .", "we introduce a compositional and interpretable programming language kopl to represent the reasoning process of complex questions .", "for each question , we provide the corresponding kopl program and sparql query , so that kqa pro can serve for both kbqa and semantic parsing tasks .", "experimental results show that state - of - the - art kbqa methods cannot achieve promising results on kqa pro as on current datasets , which suggests that kqa pro is challenging and complex kbqa requires further research efforts .", "we also treat kqa pro as a diagnostic dataset for testing multiple reasoning skills , conduct a thorough evaluation of existing models and discuss further directions for complex kbqa .", "our codes and datasets can be obtained from https : / / github . com / shijx12 / kqapro _ baselines ."], "events": [{"event_type": "ITT", "arguments": [{"text": "complex question answering over knowledge base", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["complex", "question", "answering", "over", "knowledge", "base"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [11]}}, {"event_type": "RWF", "arguments": [{"text": "existing benchmarks", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "benchmarks"], "offsets": [35, 36]}, {"text": "shortcomings", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["shortcomings"], "offsets": [39]}, {"text": "limit", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["limit"], "offsets": [41]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [37]}}, {"event_type": "RWF", "arguments": [{"text": "existing benchmarks", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "benchmarks"], "offsets": [35, 36]}, {"text": "without explicit reasoning processes", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["without", "explicit", "reasoning", "processes"], "offsets": [55, 56, 57, 58]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [52]}}, {"event_type": "RWF", "arguments": [{"text": "questions", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["questions"], "offsets": [62]}, {"text": "poor", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["poor"], "offsets": [64]}], "trigger": {"text": "poor", "tokens": ["poor"], "offsets": [64]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [74]}, {"text": "kqa pro", "nugget_type": "DST", "argument_type": "Content", "tokens": ["kqa", "pro"], "offsets": [76, 77]}, {"text": "complex question answering over knowledge base", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["complex", "question", "answering", "over", "knowledge", "base"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [75]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [92]}, {"text": "represent", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["represent"], "offsets": [102]}, {"text": "compositional and interpretable programming language", "nugget_type": "APP", "argument_type": "Content", "tokens": ["compositional", "and", "interpretable", "programming", "language"], "offsets": [95, 96, 97, 98, 99]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [93]}}, {"event_type": "PUR", "arguments": [{"text": "reasoning process of complex questions", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["reasoning", "process", "of", "complex", "questions"], "offsets": [104, 105, 106, 107, 108]}], "trigger": {"text": "represent", "tokens": ["represent"], "offsets": [102]}}, {"event_type": "FIN", "arguments": [{"text": "cannot achieve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["cannot", "achieve"], "offsets": [151, 152]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [140]}}, {"event_type": "FIN", "arguments": [{"text": "challenging", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["challenging"], "offsets": [169]}, {"text": "requires", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["requires"], "offsets": [173]}], "trigger": {"text": "suggests", "tokens": ["suggests"], "offsets": [164]}}, {"event_type": "FAC", "arguments": [{"text": "kqa pro", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["kqa", "pro"], "offsets": [166, 167]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [169]}}, {"event_type": "FAC", "arguments": [{"text": "complex question answering over knowledge base", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["complex", "question", "answering", "over", "knowledge", "base"], "offsets": [0, 1, 2, 3, 4, 5]}, {"text": "further research efforts", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["further", "research", "efforts"], "offsets": [174, 175, 176]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [173]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [178]}, {"text": "testing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["testing"], "offsets": [188]}, {"text": "kqa pro as a diagnostic dataset", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["kqa", "pro", "as", "a", "diagnostic", "dataset"], "offsets": [181, 182, 183, 184, 185, 186]}], "trigger": {"text": "treat", "tokens": ["treat"], "offsets": [180]}}, {"event_type": "PUR", "arguments": [{"text": "multiple reasoning skills", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["multiple", "reasoning", "skills"], "offsets": [189, 190, 191]}], "trigger": {"text": "testing", "tokens": ["testing"], "offsets": [188]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [178]}, {"text": "thorough evaluation of existing models", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["thorough", "evaluation", "of", "existing", "models"], "offsets": [195, 196, 197, 198, 199]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [193]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [178]}, {"text": "complex question answering over knowledge base", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["complex", "question", "answering", "over", "knowledge", "base"], "offsets": [0, 1, 2, 3, 4, 5]}, {"text": "further directions", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["further", "directions"], "offsets": [202, 203]}], "trigger": {"text": "discuss", "tokens": ["discuss"], "offsets": [201]}}, {"event_type": "PUR", "arguments": [{"text": "development of complex kbqa", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["development", "of", "complex", "question", "answering", "over", "knowledge", "base"], "offsets": [43, 44, 0, 1, 2, 3, 4, 5]}], "trigger": {"text": "limit", "tokens": ["limit"], "offsets": [41]}}, {"event_type": "MDS", "arguments": [{"text": "corresponding kopl program", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["corresponding", "kopl", "program"], "offsets": [117, 118, 119]}, {"text": "sparql query", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["sparql", "query"], "offsets": [121, 122]}, {"text": "each question", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["each", "question"], "offsets": [111, 112]}, {"text": "serve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["serve"], "offsets": [129]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [115]}}, {"event_type": "PUR", "arguments": [{"text": "kbqa and semantic parsing tasks", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["question", "answering", "over", "knowledge", "base", "and", "semantic", "parsing", "tasks"], "offsets": [1, 2, 3, 4, 5, 133, 134, 135, 136]}], "trigger": {"text": "serve", "tokens": ["serve"], "offsets": [129]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art kbqa methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["state", "-", "of", "-", "the", "-", "art", "question", "answering", "over", "knowledge", "base", "methods"], "offsets": [142, 143, 144, 145, 146, 147, 148, 1, 2, 3, 4, 5, 150]}, {"text": "promising results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["promising", "results"], "offsets": [153, 154]}, {"text": "kqa pro", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["kqa", "pro"], "offsets": [156, 157]}], "trigger": {"text": "cannot achieve", "tokens": ["cannot", "achieve"], "offsets": [151, 152]}}], "document": ["complex", "question", "answering", "over", "knowledge", "base", "(", "complex", "kbqa", ")", "is", "challenging", "because", "it", "requires", "various", "compositional", "reasoning", "capabilities", ",", "such", "as", "multi", "-", "hop", "inference", ",", "attribute", "comparison", ",", "set", "operation", ",", "etc", ".", "existing", "benchmarks", "have", "some", "shortcomings", "that", "limit", "the", "development", "of", "complex", "kbqa", ":", "1", ")", "they", "only", "provide", "qa", "pairs", "without", "explicit", "reasoning", "processes", ";", "2", ")", "questions", "are", "poor", "in", "diversity", "or", "scale", ".", "to", "this", "end", ",", "we", "introduce", "kqa", "pro", ",", "a", "dataset", "for", "complex", "kbqa", "including", "around", "120k", "diverse", "natural", "language", "questions", ".", "we", "introduce", "a", "compositional", "and", "interpretable", "programming", "language", "kopl", "to", "represent", "the", "reasoning", "process", "of", "complex", "questions", ".", "for", "each", "question", ",", "we", "provide", "the", "corresponding", "kopl", "program", "and", "sparql", "query", ",", "so", "that", "kqa", "pro", "can", "serve", "for", "both", "kbqa", "and", "semantic", "parsing", "tasks", ".", "experimental", "results", "show", "that", "state", "-", "of", "-", "the", "-", "art", "kbqa", "methods", "cannot", "achieve", "promising", "results", "on", "kqa", "pro", "as", "on", "current", "datasets", ",", "which", "suggests", "that", "kqa", "pro", "is", "challenging", "and", "complex", "kbqa", "requires", "further", "research", "efforts", ".", "we", "also", "treat", "kqa", "pro", "as", "a", "diagnostic", "dataset", "for", "testing", "multiple", "reasoning", "skills", ",", "conduct", "a", "thorough", "evaluation", "of", "existing", "models", "and", "discuss", "further", "directions", "for", "complex", "kbqa", ".", "our", "codes", "and", "datasets", "can", "be", "obtained", "from", "https", ":", "/", "/", "github", ".", "com", "/", "shijx12", "/", "kqapro", "_", "baselines", "."]}, {"venue": "ACL", "title": "Revisiting Low-Resource Neural Machine Translation: A Case Study", "abstract": "It has been shown that the performance of neural machine translation (NMT) drops starkly in low-resource conditions, underperforming phrase-based statistical machine translation (PBSMT) and requiring large amounts of auxiliary data to achieve competitive results. In this paper, we re-assess the validity of these results, arguing that they are the result of lack of system adaptation to low-resource settings. We discuss some pitfalls to be aware of when training low-resource NMT systems, and recent techniques that have shown to be especially helpful in low-resource settings, resulting in a set of best practices for low-resource NMT. In our experiments on German\u2013English with different amounts of IWSLT14 training data, we show that, without the use of any auxiliary monolingual or multilingual data, an optimized NMT system can outperform PBSMT with far less data than previously claimed. We also apply these techniques to a low-resource Korean\u2013English dataset, surpassing previously reported results by 4 BLEU.", "doc_id": "fe623f87beffcd0b82a5e8a1c4210d16", "publication_year": 2019, "sentences": ["it has been shown that the performance of neural machine translation ( nmt ) drops starkly in low - resource conditions , underperforming phrase - based statistical machine translation ( pbsmt ) and requiring large amounts of auxiliary data to achieve competitive results .", "in this paper , we re - assess the validity of these results , arguing that they are the result of lack of system adaptation to low - resource settings .", "we discuss some pitfalls to be aware of when training low - resource nmt systems , and recent techniques that have shown to be especially helpful in low - resource settings , resulting in a set of best practices for low - resource nmt .", "in our experiments on german \u2013 english with different amounts of iwslt14 training data , we show that , without the use of any auxiliary monolingual or multilingual data , an optimized nmt system can outperform pbsmt with far less data than previously claimed .", "we also apply these techniques to a low - resource korean \u2013 english dataset , surpassing previously reported results by 4 bleu ."], "events": [{"event_type": "ITT", "arguments": [{"text": "neural machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["neural", "machine", "translation"], "offsets": [8, 9, 10]}], "trigger": {"text": "drops", "tokens": ["drops"], "offsets": [14]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [48]}, {"text": "validity of these results", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["validity", "of", "these", "results"], "offsets": [53, 54, 55, 56]}], "trigger": {"text": "re - assess", "tokens": ["re", "-", "assess"], "offsets": [49, 50, 51]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [75]}, {"text": "some pitfalls", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["some", "pitfalls"], "offsets": [77, 78]}, {"text": "recent techniques", "nugget_type": "APP", "argument_type": "Content", "tokens": ["recent", "techniques"], "offsets": [92, 93]}, {"text": "to be aware of when training low - resource nmt systems", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "be", "aware", "of", "when", "training", "low", "-", "resource", "neural", "machine", "translation", "systems"], "offsets": [79, 80, 81, 82, 83, 84, 85, 86, 87, 8, 9, 10, 89]}, {"text": "that have shown to be especially helpful in low - resource settings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["that", "have", "shown", "to", "be", "especially", "helpful", "in", "low", "-", "resource", "settings"], "offsets": [94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105]}, {"text": "resulting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["resulting"], "offsets": [107]}, {"text": "low - resource nmt", "nugget_type": "APP", "argument_type": "Target", "tokens": ["low", "-", "resource", "neural", "machine", "translation"], "offsets": [115, 116, 117, 8, 9, 10]}], "trigger": {"text": "discuss", "tokens": ["discuss"], "offsets": [76]}}, {"event_type": "PUR", "arguments": [{"text": "set of best practices", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["set", "of", "best", "practices"], "offsets": [110, 111, 112, 113]}], "trigger": {"text": "resulting", "tokens": ["resulting"], "offsets": [107]}}, {"event_type": "CMP", "arguments": [{"text": "on german \u2013 english", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "german", "\u2013", "english"], "offsets": [123, 124, 125, 126]}, {"text": "iwslt14 training data", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["iwslt14", "training", "data"], "offsets": [131, 132, 133]}, {"text": "without the use of any auxiliary monolingual or multilingual data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "the", "use", "of", "any", "auxiliary", "monolingual", "or", "multilingual", "data"], "offsets": [139, 140, 141, 142, 143, 144, 145, 146, 147, 148]}, {"text": "optimized nmt system", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["optimized", "neural", "machine", "translation", "system"], "offsets": [151, 8, 9, 10, 153]}, {"text": "pbsmt", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["pbsmt"], "offsets": [156]}, {"text": "outperform", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperform"], "offsets": [155]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [155]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [165]}, {"text": "these techniques", "nugget_type": "APP", "argument_type": "Content", "tokens": ["these", "techniques"], "offsets": [168, 169]}, {"text": "low - resource korean \u2013 english dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["low", "-", "resource", "korean", "\u2013", "english", "dataset"], "offsets": [172, 173, 174, 175, 176, 177, 178]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [167]}}, {"event_type": "CMP", "arguments": [{"text": "4", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["4"], "offsets": [185]}, {"text": "bleu", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["bleu"], "offsets": [186]}, {"text": "previously reported results", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previously", "reported", "results"], "offsets": [181, 182, 183]}], "trigger": {"text": "surpassing", "tokens": ["surpassing"], "offsets": [180]}}, {"event_type": "RWF", "arguments": [{"text": "lack", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lack"], "offsets": [65]}, {"text": "system adaptation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["system", "adaptation"], "offsets": [67, 68]}], "trigger": {"text": "lack", "tokens": ["lack"], "offsets": [65]}}, {"event_type": "RWF", "arguments": [{"text": "neural machine translation", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["neural", "machine", "translation"], "offsets": [8, 9, 10]}, {"text": "drops", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["drops"], "offsets": [14]}, {"text": "starkly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["starkly"], "offsets": [15]}], "trigger": {"text": "drops", "tokens": ["drops"], "offsets": [14]}}, {"event_type": "RWF", "arguments": [{"text": "underperforming", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["underperforming"], "offsets": [22]}], "trigger": {"text": "underperforming", "tokens": ["underperforming"], "offsets": [22]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [135]}, {"text": "outperform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperform"], "offsets": [155]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [136]}}], "document": ["it", "has", "been", "shown", "that", "the", "performance", "of", "neural", "machine", "translation", "(", "nmt", ")", "drops", "starkly", "in", "low", "-", "resource", "conditions", ",", "underperforming", "phrase", "-", "based", "statistical", "machine", "translation", "(", "pbsmt", ")", "and", "requiring", "large", "amounts", "of", "auxiliary", "data", "to", "achieve", "competitive", "results", ".", "in", "this", "paper", ",", "we", "re", "-", "assess", "the", "validity", "of", "these", "results", ",", "arguing", "that", "they", "are", "the", "result", "of", "lack", "of", "system", "adaptation", "to", "low", "-", "resource", "settings", ".", "we", "discuss", "some", "pitfalls", "to", "be", "aware", "of", "when", "training", "low", "-", "resource", "nmt", "systems", ",", "and", "recent", "techniques", "that", "have", "shown", "to", "be", "especially", "helpful", "in", "low", "-", "resource", "settings", ",", "resulting", "in", "a", "set", "of", "best", "practices", "for", "low", "-", "resource", "nmt", ".", "in", "our", "experiments", "on", "german", "\u2013", "english", "with", "different", "amounts", "of", "iwslt14", "training", "data", ",", "we", "show", "that", ",", "without", "the", "use", "of", "any", "auxiliary", "monolingual", "or", "multilingual", "data", ",", "an", "optimized", "nmt", "system", "can", "outperform", "pbsmt", "with", "far", "less", "data", "than", "previously", "claimed", ".", "we", "also", "apply", "these", "techniques", "to", "a", "low", "-", "resource", "korean", "\u2013", "english", "dataset", ",", "surpassing", "previously", "reported", "results", "by", "4", "bleu", "."]}, {"venue": "ACL", "title": "Simplify the Usage of Lexicon in Chinese NER", "abstract": "Recently, many works have tried to augment the performance of Chinese named entity recognition (NER) using word lexicons. As a representative, Lattice-LSTM has achieved new benchmark results on several public Chinese NER datasets. However, Lattice-LSTM has a complex model architecture. This limits its application in many industrial areas where real-time NER responses are needed. In this work, we propose a simple but effective method for incorporating the word lexicon into the character representations. This method avoids designing a complicated sequence modeling architecture, and for any neural NER model, it requires only subtle adjustment of the character representation layer to introduce the lexicon information. Experimental studies on four benchmark Chinese NER datasets show that our method achieves an inference speed up to 6.15 times faster than those of state-of-the-art methods, along with a better performance. The experimental results also show that the proposed method can be easily incorporated with pre-trained models like BERT.", "doc_id": "0014a86ada8243a5d6539ece33ac0ac8", "publication_year": 2020, "sentences": ["recently , many works have tried to augment the performance of chinese named entity recognition ( ner ) using word lexicons .", "as a representative , lattice - lstm has achieved new benchmark results on several public chinese ner datasets .", "however , lattice - lstm has a complex model architecture .", "this limits its application in many industrial areas where real - time ner responses are needed .", "in this work , we propose a simple but effective method for incorporating the word lexicon into the character representations .", "this method avoids designing a complicated sequence modeling architecture , and for any neural ner model , it requires only subtle adjustment of the character representation layer to introduce the lexicon information .", "experimental studies on four benchmark chinese ner datasets show that our method achieves an inference speed up to 6 . 15 times faster than those of state - of - the - art methods , along with a better performance .", "the experimental results also show that the proposed method can be easily incorporated with pre - trained models like bert ."], "events": [{"event_type": "ITT", "arguments": [{"text": "lattice - lstm", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["lattice", "-", "lstm"], "offsets": [26, 27, 28]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [30]}}, {"event_type": "RWF", "arguments": [{"text": "lattice - lstm", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["lattice", "-", "lstm"], "offsets": [43, 44, 45]}, {"text": "complex model architecture", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["complex", "model", "architecture"], "offsets": [48, 49, 50]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [46]}}, {"event_type": "RWF", "arguments": [{"text": "limits", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["limits"], "offsets": [53]}, {"text": "application", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["application"], "offsets": [55]}], "trigger": {"text": "limits", "tokens": ["limits"], "offsets": [53]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [73]}, {"text": "simple but effective method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["simple", "but", "effective", "method"], "offsets": [76, 77, 78, 79]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [74]}}, {"event_type": "RWS", "arguments": [{"text": "word lexicon", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["word", "lexicon"], "offsets": [83, 84]}, {"text": "character representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["character", "representations"], "offsets": [87, 88]}], "trigger": {"text": "incorporating", "tokens": ["incorporating"], "offsets": [81]}}, {"event_type": "WKS", "arguments": [{"text": "complicated sequence modeling architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["complicated", "sequence", "modeling", "architecture"], "offsets": [95, 96, 97, 98]}], "trigger": {"text": "avoids designing", "tokens": ["avoids", "designing"], "offsets": [92, 93]}}, {"event_type": "WKS", "arguments": [{"text": "any neural ner model", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["any", "neural", "ner", "model"], "offsets": [102, 103, 104, 105]}, {"text": "only subtle adjustment of the character representation layer", "nugget_type": "APP", "argument_type": "Content", "tokens": ["only", "subtle", "adjustment", "of", "the", "character", "representation", "layer"], "offsets": [109, 110, 111, 112, 113, 114, 115, 116]}, {"text": "introduce", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["introduce"], "offsets": [118]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [108]}}, {"event_type": "PUR", "arguments": [{"text": "lexicon information", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["lexicon", "information"], "offsets": [120, 121]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [118]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["achieves"], "offsets": [135]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [131]}}, {"event_type": "CMP", "arguments": [{"text": "simple but effective method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["simple", "but", "effective", "method"], "offsets": [76, 77, 78, 79]}, {"text": "faster", "nugget_type": "STR", "argument_type": "Result", "tokens": ["faster"], "offsets": [145]}, {"text": "6 . 15 times", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["6", ".", "15", "times"], "offsets": [141, 142, 143, 144]}, {"text": "state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [149, 150, 151, 152, 153, 154, 155, 156]}, {"text": "better performance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better", "performance"], "offsets": [161, 162]}, {"text": "inference speed", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["inference", "speed"], "offsets": [137, 138]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [135]}}, {"event_type": "FIN", "arguments": [{"text": "incorporated", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["incorporated"], "offsets": [176]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [168]}}, {"event_type": "FAC", "arguments": [{"text": "simple but effective method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["simple", "but", "effective", "method"], "offsets": [76, 77, 78, 79]}, {"text": "easily", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["easily"], "offsets": [175]}, {"text": "pre - trained models", "nugget_type": "APP", "argument_type": "Object", "tokens": ["pre", "-", "trained", "models"], "offsets": [178, 179, 180, 181]}], "trigger": {"text": "incorporated", "tokens": ["incorporated"], "offsets": [176]}}], "document": ["recently", ",", "many", "works", "have", "tried", "to", "augment", "the", "performance", "of", "chinese", "named", "entity", "recognition", "(", "ner", ")", "using", "word", "lexicons", ".", "as", "a", "representative", ",", "lattice", "-", "lstm", "has", "achieved", "new", "benchmark", "results", "on", "several", "public", "chinese", "ner", "datasets", ".", "however", ",", "lattice", "-", "lstm", "has", "a", "complex", "model", "architecture", ".", "this", "limits", "its", "application", "in", "many", "industrial", "areas", "where", "real", "-", "time", "ner", "responses", "are", "needed", ".", "in", "this", "work", ",", "we", "propose", "a", "simple", "but", "effective", "method", "for", "incorporating", "the", "word", "lexicon", "into", "the", "character", "representations", ".", "this", "method", "avoids", "designing", "a", "complicated", "sequence", "modeling", "architecture", ",", "and", "for", "any", "neural", "ner", "model", ",", "it", "requires", "only", "subtle", "adjustment", "of", "the", "character", "representation", "layer", "to", "introduce", "the", "lexicon", "information", ".", "experimental", "studies", "on", "four", "benchmark", "chinese", "ner", "datasets", "show", "that", "our", "method", "achieves", "an", "inference", "speed", "up", "to", "6", ".", "15", "times", "faster", "than", "those", "of", "state", "-", "of", "-", "the", "-", "art", "methods", ",", "along", "with", "a", "better", "performance", ".", "the", "experimental", "results", "also", "show", "that", "the", "proposed", "method", "can", "be", "easily", "incorporated", "with", "pre", "-", "trained", "models", "like", "bert", "."]}, {"venue": "ACL", "title": "A Corpus for Reasoning about Natural Language Grounded in Photographs", "abstract": "We introduce a new dataset for joint reasoning about natural language and images, with a focus on semantic diversity, compositionality, and visual reasoning challenges. The data contains 107,292 examples of English sentences paired with web photographs. The task is to determine whether a natural language caption is true about a pair of photographs. We crowdsource the data using sets of visually rich images and a compare-and-contrast task to elicit linguistically diverse language. Qualitative analysis shows the data requires compositional joint reasoning, including about quantities, comparisons, and relations. Evaluation using state-of-the-art visual reasoning methods shows the data presents a strong challenge.", "doc_id": "d9595027b0bc9bfe42945ee76fb0467e", "publication_year": 2019, "sentences": ["we introduce a new dataset for joint reasoning about natural language and images , with a focus on semantic diversity , compositionality , and visual reasoning challenges .", "the data contains 107 , 292 examples of english sentences paired with web photographs .", "the task is to determine whether a natural language caption is true about a pair of photographs .", "we crowdsource the data using sets of visually rich images and a compare - and - contrast task to elicit linguistically diverse language .", "qualitative analysis shows the data requires compositional joint reasoning , including about quantities , comparisons , and relations .", "evaluation using state - of - the - art visual reasoning methods shows the data presents a strong challenge ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["dataset"], "offsets": [4]}, {"text": "joint reasoning", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["joint", "reasoning"], "offsets": [6, 7]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "natural language", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["natural", "language"], "offsets": [9, 10]}, {"text": "images", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["images"], "offsets": [12]}], "trigger": {"text": "joint reasoning", "tokens": ["joint", "reasoning"], "offsets": [6, 7]}}, {"event_type": "FIN", "arguments": [{"text": "requires", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["requires"], "offsets": [90]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [87]}}, {"event_type": "FAC", "arguments": [{"text": "data", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["data"], "offsets": [89]}, {"text": "compositional joint reasoning", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["compositional", "joint", "reasoning"], "offsets": [91, 92, 93]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [90]}}, {"event_type": "FIN", "arguments": [{"text": "presents", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["presents"], "offsets": [119]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [116]}}, {"event_type": "FAC", "arguments": [{"text": "data", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["data"], "offsets": [118]}, {"text": "strong challenge", "nugget_type": "STR", "argument_type": "Object", "tokens": ["strong", "challenge"], "offsets": [121, 122]}], "trigger": {"text": "presents", "tokens": ["presents"], "offsets": [119]}}, {"event_type": "MDS", "arguments": [{"text": "compare - and - contrast task", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["compare", "-", "and", "-", "contrast", "task"], "offsets": [73, 74, 75, 76, 77, 78]}, {"text": "sets of visually rich images", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["sets", "of", "visually", "rich", "images"], "offsets": [66, 67, 68, 69, 70]}, {"text": "data", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["data"], "offsets": [64]}, {"text": "elicit", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["elicit"], "offsets": [80]}], "trigger": {"text": "crowdsource", "tokens": ["crowdsource"], "offsets": [62]}}, {"event_type": "PUR", "arguments": [{"text": "linguistically diverse language", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["linguistically", "diverse", "language"], "offsets": [81, 82, 83]}], "trigger": {"text": "elicit", "tokens": ["elicit"], "offsets": [80]}}, {"event_type": "WKS", "arguments": [{"text": "state - of - the - art visual reasoning methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["state", "-", "of", "-", "the", "-", "art", "visual", "reasoning", "methods"], "offsets": [106, 107, 108, 109, 110, 111, 112, 113, 114, 115]}, {"text": "evaluation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["evaluation"], "offsets": [104]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [105]}}], "document": ["we", "introduce", "a", "new", "dataset", "for", "joint", "reasoning", "about", "natural", "language", "and", "images", ",", "with", "a", "focus", "on", "semantic", "diversity", ",", "compositionality", ",", "and", "visual", "reasoning", "challenges", ".", "the", "data", "contains", "107", ",", "292", "examples", "of", "english", "sentences", "paired", "with", "web", "photographs", ".", "the", "task", "is", "to", "determine", "whether", "a", "natural", "language", "caption", "is", "true", "about", "a", "pair", "of", "photographs", ".", "we", "crowdsource", "the", "data", "using", "sets", "of", "visually", "rich", "images", "and", "a", "compare", "-", "and", "-", "contrast", "task", "to", "elicit", "linguistically", "diverse", "language", ".", "qualitative", "analysis", "shows", "the", "data", "requires", "compositional", "joint", "reasoning", ",", "including", "about", "quantities", ",", "comparisons", ",", "and", "relations", ".", "evaluation", "using", "state", "-", "of", "-", "the", "-", "art", "visual", "reasoning", "methods", "shows", "the", "data", "presents", "a", "strong", "challenge", "."]}, {"venue": "ACL", "title": "Better OOV Translation with Bilingual Terminology Mining", "abstract": "Unseen words, also called out-of-vocabulary words (OOVs), are difficult for machine translation. In neural machine translation, byte-pair encoding can be used to represent OOVs, but they are still often incorrectly translated. We improve the translation of OOVs in NMT using easy-to-obtain monolingual data. We look for OOVs in the text to be translated and translate them using simple-to-construct bilingual word embeddings (BWEs). In our MT experiments we take the 5-best candidates, which is motivated by intrinsic mining experiments. Using all five of the proposed target language words as queries we mine target-language sentences. We then back-translate, forcing the back-translation of each of the five proposed target-language OOV-translation-candidates to be the original source-language OOV. We show that by using this synthetic data to fine-tune our system the translation of OOVs can be dramatically improved. In our experiments we use a system trained on Europarl and mine sentences containing medical terms from monolingual data.", "doc_id": "039f95b251b651d1e2ee4caad15f4012", "publication_year": 2019, "sentences": ["unseen words , also called out - of - vocabulary words ( oovs ) , are difficult for machine translation .", "in neural machine translation , byte - pair encoding can be used to represent oovs , but they are still often incorrectly translated .", "we improve the translation of oovs in nmt using easy - to - obtain monolingual data .", "we look for oovs in the text to be translated and translate them using simple - to - construct bilingual word embeddings ( bwes ) .", "in our mt experiments we take the 5 - best candidates , which is motivated by intrinsic mining experiments .", "using all five of the proposed target language words as queries we mine target - language sentences .", "we then back - translate , forcing the back - translation of each of the five proposed target - language oov - translation - candidates to be the original source - language oov .", "we show that by using this synthetic data to fine - tune our system the translation of oovs can be dramatically improved .", "in our experiments we use a system trained on europarl and mine sentences containing medical terms from monolingual data ."], "events": [{"event_type": "ITT", "arguments": [{"text": "out - of - vocabulary words", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["out", "-", "of", "-", "vocabulary", "words"], "offsets": [5, 6, 7, 8, 9, 10]}], "trigger": {"text": "difficult", "tokens": ["difficult"], "offsets": [16]}}, {"event_type": "RWF", "arguments": [{"text": "often", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["often"], "offsets": [41]}, {"text": "out - of - vocabulary words", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["out", "-", "of", "-", "vocabulary", "words"], "offsets": [5, 6, 7, 8, 9, 10]}], "trigger": {"text": "incorrectly translated", "tokens": ["incorrectly", "translated"], "offsets": [42, 43]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [45]}, {"text": "translation of oovs in nmt", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["translation", "of", "out", "-", "of", "-", "vocabulary", "words", "in", "nmt"], "offsets": [48, 49, 5, 6, 7, 8, 9, 10, 51, 52]}, {"text": "using easy - to - obtain monolingual data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "easy", "-", "to", "-", "obtain", "monolingual", "data"], "offsets": [53, 54, 55, 56, 57, 58, 59, 60]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [46]}}, {"event_type": "MDS", "arguments": [{"text": "in the text to be translated", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "text", "to", "be", "translated"], "offsets": [66, 67, 68, 69, 70, 71]}, {"text": "out - of - vocabulary words", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["out", "-", "of", "-", "vocabulary", "words"], "offsets": [5, 6, 7, 8, 9, 10]}, {"text": "simple - to - construct bilingual word embeddings", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["simple", "-", "to", "-", "construct", "bilingual", "word", "embeddings"], "offsets": [76, 77, 78, 79, 80, 81, 82, 83]}], "trigger": {"text": "translate", "tokens": ["translate"], "offsets": [73]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [92]}, {"text": "5 - best candidates", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["5", "-", "best", "candidates"], "offsets": [95, 96, 97, 98]}], "trigger": {"text": "take", "tokens": ["take"], "offsets": [93]}}, {"event_type": "MDS", "arguments": [{"text": "target - language sentences", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["target", "-", "language", "sentences"], "offsets": [121, 122, 123, 124]}, {"text": "all five of the proposed target language words", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["all", "five", "of", "the", "proposed", "target", "language", "words"], "offsets": [109, 110, 111, 112, 113, 114, 115, 116]}], "trigger": {"text": "mine", "tokens": ["mine"], "offsets": [120]}}, {"event_type": "MDS", "arguments": [{"text": "back - translation", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["back", "-", "translation"], "offsets": [134, 135, 136]}, {"text": "original source - language oov", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["original", "source", "-", "language", "out", "-", "of", "-", "vocabulary", "words"], "offsets": [154, 155, 156, 157, 5, 6, 7, 8, 9, 10]}], "trigger": {"text": "forcing", "tokens": ["forcing"], "offsets": [132]}}, {"event_type": "FAC", "arguments": [{"text": "translation of oovs", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["translation", "of", "oovs"], "offsets": [175, 176, 177]}, {"text": "dramatically", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["dramatically"], "offsets": [180]}, {"text": "using this synthetic data to fine - tune our system", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "this", "synthetic", "data", "to", "fine", "-", "tune", "our", "system"], "offsets": [164, 165, 166, 167, 168, 169, 170, 171, 172, 173]}], "trigger": {"text": "improved", "tokens": ["improved"], "offsets": [181]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [160]}, {"text": "improved", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["improved"], "offsets": [181]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [161]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [186]}, {"text": "system", "nugget_type": "APP", "argument_type": "Content", "tokens": ["system"], "offsets": [189]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [187]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [186]}, {"text": "sentences", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["sentences"], "offsets": [195]}], "trigger": {"text": "mine", "tokens": ["mine"], "offsets": [194]}}], "document": ["unseen", "words", ",", "also", "called", "out", "-", "of", "-", "vocabulary", "words", "(", "oovs", ")", ",", "are", "difficult", "for", "machine", "translation", ".", "in", "neural", "machine", "translation", ",", "byte", "-", "pair", "encoding", "can", "be", "used", "to", "represent", "oovs", ",", "but", "they", "are", "still", "often", "incorrectly", "translated", ".", "we", "improve", "the", "translation", "of", "oovs", "in", "nmt", "using", "easy", "-", "to", "-", "obtain", "monolingual", "data", ".", "we", "look", "for", "oovs", "in", "the", "text", "to", "be", "translated", "and", "translate", "them", "using", "simple", "-", "to", "-", "construct", "bilingual", "word", "embeddings", "(", "bwes", ")", ".", "in", "our", "mt", "experiments", "we", "take", "the", "5", "-", "best", "candidates", ",", "which", "is", "motivated", "by", "intrinsic", "mining", "experiments", ".", "using", "all", "five", "of", "the", "proposed", "target", "language", "words", "as", "queries", "we", "mine", "target", "-", "language", "sentences", ".", "we", "then", "back", "-", "translate", ",", "forcing", "the", "back", "-", "translation", "of", "each", "of", "the", "five", "proposed", "target", "-", "language", "oov", "-", "translation", "-", "candidates", "to", "be", "the", "original", "source", "-", "language", "oov", ".", "we", "show", "that", "by", "using", "this", "synthetic", "data", "to", "fine", "-", "tune", "our", "system", "the", "translation", "of", "oovs", "can", "be", "dramatically", "improved", ".", "in", "our", "experiments", "we", "use", "a", "system", "trained", "on", "europarl", "and", "mine", "sentences", "containing", "medical", "terms", "from", "monolingual", "data", "."]}, {"venue": "ACL", "title": "Social Bias Frames: Reasoning about Social and Power Implications of Language", "abstract": "Warning: this paper contains content that may be offensive or upsetting. Language has the power to reinforce stereotypes and project social biases onto others. At the core of the challenge is that it is rarely what is stated explicitly, but rather the implied meanings, that frame people\u2019s judgments about others. For example, given a statement that \u201cwe shouldn\u2019t lower our standards to hire more women,\u201d most listeners will infer the implicature intended by the speaker - that \u201cwomen (candidates) are less qualified.\u201d Most semantic formalisms, to date, do not capture such pragmatic implications in which people express social biases and power differentials in language. We introduce Social Bias Frames, a new conceptual formalism that aims to model the pragmatic frames in which people project social biases and stereotypes onto others. In addition, we introduce the Social Bias Inference Corpus to support large-scale modelling and evaluation with 150k structured annotations of social media posts, covering over 34k implications about a thousand demographic groups. We then establish baseline approaches that learn to recover Social Bias Frames from unstructured text. We find that while state-of-the-art neural models are effective at high-level categorization of whether a given statement projects unwanted social bias (80% F1), they are not effective at spelling out more detailed explanations in terms of Social Bias Frames. Our study motivates future work that combines structured pragmatic inference with commonsense reasoning on social implications.", "doc_id": "bf5b65ac46a09c8736400f3fefa90526", "publication_year": 2020, "sentences": ["warning : this paper contains content that may be offensive or upsetting .", "language has the power to reinforce stereotypes and project social biases onto others .", "at the core of the challenge is that it is rarely what is stated explicitly , but rather the implied meanings , that frame people \u2019 s judgments about others .", "for example , given a statement that \u201c we shouldn \u2019 t lower our standards to hire more women , \u201d most listeners will infer the implicature intended by the speaker - that \u201c women ( candidates ) are less qualified . \u201d", "most semantic formalisms , to date , do not capture such pragmatic implications in which people express social biases and power differentials in language .", "we introduce social bias frames , a new conceptual formalism that aims to model the pragmatic frames in which people project social biases and stereotypes onto others .", "in addition , we introduce the social bias inference corpus to support large - scale modelling and evaluation with 150k structured annotations of social media posts , covering over 34k implications about a thousand demographic groups .", "we then establish baseline approaches that learn to recover social bias frames from unstructured text .", "we find that while state - of - the - art neural models are effective at high - level categorization of whether a given statement projects unwanted social bias ( 80 % f1 ) , they are not effective at spelling out more detailed explanations in terms of social bias frames .", "our study motivates future work that combines structured pragmatic inference with commonsense reasoning on social implications ."], "events": [{"event_type": "ITT", "arguments": [{"text": "social biases", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["social", "biases"], "offsets": [22, 23]}], "trigger": {"text": "project", "tokens": ["project"], "offsets": [21]}}, {"event_type": "RWF", "arguments": [{"text": "rarely what is stated explicitly", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["rarely", "what", "is", "stated", "explicitly"], "offsets": [37, 38, 39, 40, 41]}], "trigger": {"text": "rarely what is stated explicitly", "tokens": ["rarely", "what", "is", "stated", "explicitly"], "offsets": [37, 38, 39, 40, 41]}}, {"event_type": "RWF", "arguments": [{"text": "most semantic formalisms", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["most", "semantic", "formalisms"], "offsets": [101, 102, 103]}, {"text": "not capture", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "capture"], "offsets": [109, 110]}], "trigger": {"text": "not capture", "tokens": ["not", "capture"], "offsets": [109, 110]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [126]}, {"text": "social bias frames", "nugget_type": "APP", "argument_type": "Content", "tokens": ["social", "bias", "frames"], "offsets": [128, 129, 130]}, {"text": "model", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["model"], "offsets": [139]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [127]}}, {"event_type": "PUR", "arguments": [{"text": "pragmatic frames in which people project social biases and stereotypes onto others", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["pragmatic", "frames", "in", "which", "people", "project", "social", "biases", "and", "stereotypes", "onto", "others"], "offsets": [141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [139]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [157]}, {"text": "social bias inference corpus", "nugget_type": "DST", "argument_type": "Content", "tokens": ["social", "bias", "inference", "corpus"], "offsets": [160, 161, 162, 163]}, {"text": "support", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["support"], "offsets": [165]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [158]}}, {"event_type": "PUR", "arguments": [{"text": "large - scale modelling", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["large", "-", "scale", "modelling"], "offsets": [166, 167, 168, 169]}, {"text": "evaluation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["evaluation"], "offsets": [171]}], "trigger": {"text": "support", "tokens": ["support"], "offsets": [165]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [191]}, {"text": "baseline approaches", "nugget_type": "APP", "argument_type": "Content", "tokens": ["baseline", "approaches"], "offsets": [194, 195]}, {"text": "learn to recover", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn", "to", "recover"], "offsets": [197, 198, 199]}], "trigger": {"text": "establish", "tokens": ["establish"], "offsets": [193]}}, {"event_type": "PUR", "arguments": [{"text": "social bias frames", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["social", "bias", "frames"], "offsets": [200, 201, 202]}, {"text": "from unstructured text", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "unstructured", "text"], "offsets": [203, 204, 205]}], "trigger": {"text": "learn to recover", "tokens": ["learn", "to", "recover"], "offsets": [197, 198, 199]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [207]}, {"text": "effective", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["effective"], "offsets": [221]}, {"text": "not effective", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["not", "effective"], "offsets": [244, 245]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [208]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art neural models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["state", "-", "of", "-", "the", "-", "art", "neural", "models"], "offsets": [211, 212, 213, 214, 215, 216, 217, 218, 219]}, {"text": "at high - level categorization of whether a given statement projects unwanted social bias", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "high", "-", "level", "categorization", "of", "whether", "a", "given", "statement", "projects", "unwanted", "social", "bias"], "offsets": [222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [221]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art neural models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["state", "-", "of", "-", "the", "-", "art", "neural", "models"], "offsets": [211, 212, 213, 214, 215, 216, 217, 218, 219]}, {"text": "at spelling out more detailed explanations in terms of social bias frames", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "spelling", "out", "more", "detailed", "explanations", "in", "terms", "of", "social", "bias", "frames"], "offsets": [246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257]}], "trigger": {"text": "not effective", "tokens": ["not", "effective"], "offsets": [244, 245]}}], "document": ["warning", ":", "this", "paper", "contains", "content", "that", "may", "be", "offensive", "or", "upsetting", ".", "language", "has", "the", "power", "to", "reinforce", "stereotypes", "and", "project", "social", "biases", "onto", "others", ".", "at", "the", "core", "of", "the", "challenge", "is", "that", "it", "is", "rarely", "what", "is", "stated", "explicitly", ",", "but", "rather", "the", "implied", "meanings", ",", "that", "frame", "people", "\u2019", "s", "judgments", "about", "others", ".", "for", "example", ",", "given", "a", "statement", "that", "\u201c", "we", "shouldn", "\u2019", "t", "lower", "our", "standards", "to", "hire", "more", "women", ",", "\u201d", "most", "listeners", "will", "infer", "the", "implicature", "intended", "by", "the", "speaker", "-", "that", "\u201c", "women", "(", "candidates", ")", "are", "less", "qualified", ".", "\u201d", "most", "semantic", "formalisms", ",", "to", "date", ",", "do", "not", "capture", "such", "pragmatic", "implications", "in", "which", "people", "express", "social", "biases", "and", "power", "differentials", "in", "language", ".", "we", "introduce", "social", "bias", "frames", ",", "a", "new", "conceptual", "formalism", "that", "aims", "to", "model", "the", "pragmatic", "frames", "in", "which", "people", "project", "social", "biases", "and", "stereotypes", "onto", "others", ".", "in", "addition", ",", "we", "introduce", "the", "social", "bias", "inference", "corpus", "to", "support", "large", "-", "scale", "modelling", "and", "evaluation", "with", "150k", "structured", "annotations", "of", "social", "media", "posts", ",", "covering", "over", "34k", "implications", "about", "a", "thousand", "demographic", "groups", ".", "we", "then", "establish", "baseline", "approaches", "that", "learn", "to", "recover", "social", "bias", "frames", "from", "unstructured", "text", ".", "we", "find", "that", "while", "state", "-", "of", "-", "the", "-", "art", "neural", "models", "are", "effective", "at", "high", "-", "level", "categorization", "of", "whether", "a", "given", "statement", "projects", "unwanted", "social", "bias", "(", "80", "%", "f1", ")", ",", "they", "are", "not", "effective", "at", "spelling", "out", "more", "detailed", "explanations", "in", "terms", "of", "social", "bias", "frames", ".", "our", "study", "motivates", "future", "work", "that", "combines", "structured", "pragmatic", "inference", "with", "commonsense", "reasoning", "on", "social", "implications", "."]}, {"venue": "ACL", "title": "SUMBT: Slot-Utterance Matching for Universal and Scalable Belief Tracking", "abstract": "In goal-oriented dialog systems, belief trackers estimate the probability distribution of slot-values at every dialog turn. Previous neural approaches have modeled domain- and slot-dependent belief trackers, and have difficulty in adding new slot-values, resulting in lack of flexibility of domain ontology configurations. In this paper, we propose a new approach to universal and scalable belief tracker, called slot-utterance matching belief tracker (SUMBT). The model learns the relations between domain-slot-types and slot-values appearing in utterances through attention mechanisms based on contextual semantic vectors. Furthermore, the model predicts slot-value labels in a non-parametric way. From our experiments on two dialog corpora, WOZ 2.0 and MultiWOZ, the proposed model showed performance improvement in comparison with slot-dependent methods and achieved the state-of-the-art joint accuracy.", "doc_id": "fdc09e8f65cde3629a26fe720f267593", "publication_year": 2019, "sentences": ["in goal - oriented dialog systems , belief trackers estimate the probability distribution of slot - values at every dialog turn .", "previous neural approaches have modeled domain - and slot - dependent belief trackers , and have difficulty in adding new slot - values , resulting in lack of flexibility of domain ontology configurations .", "in this paper , we propose a new approach to universal and scalable belief tracker , called slot - utterance matching belief tracker ( sumbt ) .", "the model learns the relations between domain - slot - types and slot - values appearing in utterances through attention mechanisms based on contextual semantic vectors .", "furthermore , the model predicts slot - value labels in a non - parametric way .", "from our experiments on two dialog corpora , woz 2 . 0 and multiwoz , the proposed model showed performance improvement in comparison with slot - dependent methods and achieved the state - of - the - art joint accuracy ."], "events": [{"event_type": "ITT", "arguments": [{"text": "belief trackers", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["belief", "trackers"], "offsets": [7, 8]}], "trigger": {"text": "estimate", "tokens": ["estimate"], "offsets": [9]}}, {"event_type": "RWS", "arguments": [{"text": "previous neural approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "neural", "approaches"], "offsets": [22, 23, 24]}, {"text": "domain - dependent belief trackers", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["domain", "-", "dependent", "belief", "trackers"], "offsets": [27, 28, 32, 33, 34]}, {"text": "slot - dependent belief trackers", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["slot", "-", "dependent", "belief", "trackers"], "offsets": [30, 31, 32, 33, 34]}], "trigger": {"text": "modeled", "tokens": ["modeled"], "offsets": [26]}}, {"event_type": "RWF", "arguments": [{"text": "difficulty", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["difficulty"], "offsets": [38]}, {"text": "adding new slot - values", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["adding", "new", "slot", "-", "values"], "offsets": [40, 41, 42, 43, 44]}, {"text": "domain - dependent belief trackers", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["domain", "-", "dependent", "belief", "trackers"], "offsets": [27, 28, 32, 33, 34]}, {"text": "slot - dependent belief trackers", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["slot", "-", "dependent", "belief", "trackers"], "offsets": [30, 31, 32, 33, 34]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [37]}}, {"event_type": "RWF", "arguments": [{"text": "lack of flexibility", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lack", "of", "flexibility"], "offsets": [48, 49, 50]}, {"text": "domain ontology configurations", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["domain", "ontology", "configurations"], "offsets": [52, 53, 54]}, {"text": "domain - dependent belief trackers", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["domain", "-", "dependent", "belief", "trackers"], "offsets": [27, 28, 32, 33, 34]}], "trigger": {"text": "lack of flexibility", "tokens": ["lack", "of", "flexibility"], "offsets": [48, 49, 50]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [60]}, {"text": "slot - utterance matching belief tracker", "nugget_type": "APP", "argument_type": "Content", "tokens": ["slot", "-", "utterance", "matching", "belief", "tracker"], "offsets": [73, 74, 75, 76, 77, 78]}, {"text": "universal and scalable belief tracker", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["universal", "and", "scalable", "belief", "tracker"], "offsets": [66, 67, 68, 69, 70]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [61]}}, {"event_type": "MDS", "arguments": [{"text": "relations between domain - slot - types and slot - values", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["relations", "between", "domain", "-", "slot", "-", "types", "and", "slot", "-", "values"], "offsets": [87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97]}, {"text": "contextual semantic vectors", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["contextual", "semantic", "vectors"], "offsets": [106, 107, 108]}, {"text": "appearing in utterances", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["appearing", "in", "utterances"], "offsets": [98, 99, 100]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [85]}}, {"event_type": "MDS", "arguments": [{"text": "slot - value labels", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["slot", "-", "value", "labels"], "offsets": [115, 116, 117, 118]}, {"text": "in a non - parametric way", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "non", "-", "parametric", "way"], "offsets": [119, 120, 121, 122, 123, 124]}, {"text": "model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["model"], "offsets": [113]}], "trigger": {"text": "predicts", "tokens": ["predicts"], "offsets": [114]}}, {"event_type": "CMP", "arguments": [{"text": "performance improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["performance", "improvement"], "offsets": [145, 146]}, {"text": "slot - dependent methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["slot", "-", "dependent", "methods"], "offsets": [150, 151, 152, 153]}, {"text": "proposed model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["proposed", "model"], "offsets": [142, 143]}, {"text": "woz 2 . 0", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["woz", "2", ".", "0"], "offsets": [134, 135, 136, 137]}, {"text": "multiwoz", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multiwoz"], "offsets": [139]}], "trigger": {"text": "showed", "tokens": ["showed"], "offsets": [144]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art joint accuracy", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "joint", "accuracy"], "offsets": [157, 158, 159, 160, 161, 162, 163, 164, 165]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [155]}}], "document": ["in", "goal", "-", "oriented", "dialog", "systems", ",", "belief", "trackers", "estimate", "the", "probability", "distribution", "of", "slot", "-", "values", "at", "every", "dialog", "turn", ".", "previous", "neural", "approaches", "have", "modeled", "domain", "-", "and", "slot", "-", "dependent", "belief", "trackers", ",", "and", "have", "difficulty", "in", "adding", "new", "slot", "-", "values", ",", "resulting", "in", "lack", "of", "flexibility", "of", "domain", "ontology", "configurations", ".", "in", "this", "paper", ",", "we", "propose", "a", "new", "approach", "to", "universal", "and", "scalable", "belief", "tracker", ",", "called", "slot", "-", "utterance", "matching", "belief", "tracker", "(", "sumbt", ")", ".", "the", "model", "learns", "the", "relations", "between", "domain", "-", "slot", "-", "types", "and", "slot", "-", "values", "appearing", "in", "utterances", "through", "attention", "mechanisms", "based", "on", "contextual", "semantic", "vectors", ".", "furthermore", ",", "the", "model", "predicts", "slot", "-", "value", "labels", "in", "a", "non", "-", "parametric", "way", ".", "from", "our", "experiments", "on", "two", "dialog", "corpora", ",", "woz", "2", ".", "0", "and", "multiwoz", ",", "the", "proposed", "model", "showed", "performance", "improvement", "in", "comparison", "with", "slot", "-", "dependent", "methods", "and", "achieved", "the", "state", "-", "of", "-", "the", "-", "art", "joint", "accuracy", "."]}, {"venue": "ACL", "title": "Guiding Teacher Forcing with Seer Forcing for Neural Machine Translation", "abstract": "Although teacher forcing has become the main training paradigm for neural machine translation, it usually makes predictions only conditioned on past information, and hence lacks global planning for the future. To address this problem, we introduce another decoder, called seer decoder, into the encoder-decoder framework during training, which involves future information in target predictions. Meanwhile, we force the conventional decoder to simulate the behaviors of the seer decoder via knowledge distillation. In this way, at test the conventional decoder can perform like the seer decoder without the attendance of it. Experiment results on the Chinese-English, English-German and English-Romanian translation tasks show our method can outperform competitive baselines significantly and achieves greater improvements on the bigger data sets. Besides, the experiments also prove knowledge distillation the best way to transfer knowledge from the seer decoder to the conventional decoder compared to adversarial learning and L2 regularization.", "doc_id": "d7a5cf63d07e76dad98c526d4a044d91", "publication_year": 2021, "sentences": ["although teacher forcing has become the main training paradigm for neural machine translation , it usually makes predictions only conditioned on past information , and hence lacks global planning for the future .", "to address this problem , we introduce another decoder , called seer decoder , into the encoder - decoder framework during training , which involves future information in target predictions .", "meanwhile , we force the conventional decoder to simulate the behaviors of the seer decoder via knowledge distillation .", "in this way , at test the conventional decoder can perform like the seer decoder without the attendance of it .", "experiment results on the chinese - english , english - german and english - romanian translation tasks show our method can outperform competitive baselines significantly and achieves greater improvements on the bigger data sets .", "besides , the experiments also prove knowledge distillation the best way to transfer knowledge from the seer decoder to the conventional decoder compared to adversarial learning and l2 regularization ."], "events": [{"event_type": "ITT", "arguments": [{"text": "teacher forcing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["teacher", "forcing"], "offsets": [1, 2]}], "trigger": {"text": "become", "tokens": ["become"], "offsets": [4]}}, {"event_type": "MDS", "arguments": [{"text": "seer decoder", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["seer", "decoder"], "offsets": [44, 45]}, {"text": "encoder - decoder framework", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["encoder", "-", "decoder", "framework"], "offsets": [49, 50, 51, 52]}, {"text": "during training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "training"], "offsets": [53, 54]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [39]}}, {"event_type": "MDS", "arguments": [{"text": "conventional decoder", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["conventional", "decoder"], "offsets": [69, 70]}, {"text": "behaviors of the seer decoder", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["behaviors", "of", "the", "seer", "decoder"], "offsets": [74, 75, 76, 77, 78]}, {"text": "via knowledge distillation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "knowledge", "distillation"], "offsets": [79, 80, 81]}], "trigger": {"text": "simulate", "tokens": ["simulate"], "offsets": [72]}}, {"event_type": "WKS", "arguments": [{"text": "seer decoder", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["seer", "decoder"], "offsets": [96, 97]}, {"text": "without the attendance of it", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "the", "attendance", "of", "seer", "decoder"], "offsets": [98, 99, 100, 101, 96, 97]}], "trigger": {"text": "perform like", "tokens": ["perform", "like"], "offsets": [93, 94]}}, {"event_type": "CMP", "arguments": [{"text": "outperform", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperform"], "offsets": [125]}, {"text": "competitive baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["competitive", "baselines"], "offsets": [126, 127]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [128]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [125]}}, {"event_type": "CMP", "arguments": [{"text": "seer decoder", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["seer", "decoder"], "offsets": [44, 45]}, {"text": "greater improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["greater", "improvements"], "offsets": [131, 132]}, {"text": "bigger data sets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["bigger", "data", "sets"], "offsets": [135, 136, 137]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [130]}}, {"event_type": "CMP", "arguments": [{"text": "knowledge distillation", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["knowledge", "distillation"], "offsets": [145, 146]}, {"text": "best way", "nugget_type": "STR", "argument_type": "Result", "tokens": ["best", "way"], "offsets": [148, 149]}, {"text": "knowledge", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["knowledge"], "offsets": [152]}, {"text": "from the seer decoder to the conventional decoder", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "the", "seer", "decoder", "to", "the", "conventional", "decoder"], "offsets": [153, 154, 155, 156, 157, 158, 159, 160]}, {"text": "adversarial learning", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["adversarial", "learning"], "offsets": [163, 164]}], "trigger": {"text": "transfer", "tokens": ["transfer"], "offsets": [151]}}, {"event_type": "CMP", "arguments": [{"text": "knowledge distillation", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["knowledge", "distillation"], "offsets": [145, 146]}, {"text": "best way", "nugget_type": "STR", "argument_type": "Result", "tokens": ["best", "way"], "offsets": [148, 149]}, {"text": "knowledge", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["knowledge"], "offsets": [152]}, {"text": "from the seer decoder to the conventional decoder", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "the", "seer", "decoder", "to", "the", "conventional", "decoder"], "offsets": [153, 154, 155, 156, 157, 158, 159, 160]}, {"text": "l2 regularization", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["l2", "regularization"], "offsets": [166, 167]}], "trigger": {"text": "transfer", "tokens": ["transfer"], "offsets": [151]}}], "document": ["although", "teacher", "forcing", "has", "become", "the", "main", "training", "paradigm", "for", "neural", "machine", "translation", ",", "it", "usually", "makes", "predictions", "only", "conditioned", "on", "past", "information", ",", "and", "hence", "lacks", "global", "planning", "for", "the", "future", ".", "to", "address", "this", "problem", ",", "we", "introduce", "another", "decoder", ",", "called", "seer", "decoder", ",", "into", "the", "encoder", "-", "decoder", "framework", "during", "training", ",", "which", "involves", "future", "information", "in", "target", "predictions", ".", "meanwhile", ",", "we", "force", "the", "conventional", "decoder", "to", "simulate", "the", "behaviors", "of", "the", "seer", "decoder", "via", "knowledge", "distillation", ".", "in", "this", "way", ",", "at", "test", "the", "conventional", "decoder", "can", "perform", "like", "the", "seer", "decoder", "without", "the", "attendance", "of", "it", ".", "experiment", "results", "on", "the", "chinese", "-", "english", ",", "english", "-", "german", "and", "english", "-", "romanian", "translation", "tasks", "show", "our", "method", "can", "outperform", "competitive", "baselines", "significantly", "and", "achieves", "greater", "improvements", "on", "the", "bigger", "data", "sets", ".", "besides", ",", "the", "experiments", "also", "prove", "knowledge", "distillation", "the", "best", "way", "to", "transfer", "knowledge", "from", "the", "seer", "decoder", "to", "the", "conventional", "decoder", "compared", "to", "adversarial", "learning", "and", "l2", "regularization", "."]}, {"venue": "ACL", "title": "Cross-Task Generalization via Natural Language Crowdsourcing Instructions", "abstract": "Humans (e.g., crowdworkers) have a remarkable ability in solving different tasks, by simply reading textual instructions that define them and looking at a few examples. Despite the success of the conventional supervised learning on individual datasets, such models often struggle with generalization across tasks (e.g., a question-answering system cannot solve classification tasks). A long-standing challenge in AI is to build a model that learns a new task by understanding the human-readable instructions that define it. To study this, we introduce NATURAL INSTRUCTIONS, a dataset of 61 distinct tasks, their human-authored instructions, and 193k task instances (input-output pairs). The instructions are obtained from crowdsourcing instructions used to create existing NLP datasets and mapped to a unified schema. Using this meta-dataset, we measure cross-task generalization by training models on seen tasks and measuring generalization to the remaining unseen ones. We adopt generative pre-trained language models to encode task-specific instructions along with input and generate task output. Our results indicate that models benefit from instructions when evaluated in terms of generalization to unseen tasks (19% better for models utilizing instructions). These models, however, are far behind an estimated performance upperbound indicating significant room for more progress in this direction.", "doc_id": "613f801e3f856951873e79b9fcaf51bc", "publication_year": 2022, "sentences": ["humans ( e . g . , crowdworkers ) have a remarkable ability in solving different tasks , by simply reading textual instructions that define them and looking at a few examples .", "despite the success of the conventional supervised learning on individual datasets , such models often struggle with generalization across tasks ( e . g . , a question - answering system cannot solve classification tasks ) .", "a long - standing challenge in ai is to build a model that learns a new task by understanding the human - readable instructions that define it .", "to study this , we introduce natural instructions , a dataset of 61 distinct tasks , their human - authored instructions , and 193k task instances ( input - output pairs ) .", "the instructions are obtained from crowdsourcing instructions used to create existing nlp datasets and mapped to a unified schema .", "using this meta - dataset , we measure cross - task generalization by training models on seen tasks and measuring generalization to the remaining unseen ones .", "we adopt generative pre - trained language models to encode task - specific instructions along with input and generate task output .", "our results indicate that models benefit from instructions when evaluated in terms of generalization to unseen tasks ( 19 % better for models utilizing instructions ) .", "these models , however , are far behind an estimated performance upperbound indicating significant room for more progress in this direction ."], "events": [{"event_type": "RWF", "arguments": [{"text": "conventional supervised learning", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["conventional", "supervised", "learning"], "offsets": [38, 39, 40]}, {"text": "struggle with", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["struggle", "with"], "offsets": [48, 49]}], "trigger": {"text": "struggle with", "tokens": ["struggle", "with"], "offsets": [48, 49]}}, {"event_type": "RWF", "arguments": [{"text": "long - standing challenge", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["long", "-", "standing", "challenge"], "offsets": [71, 72, 73, 74]}, {"text": "model", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["model"], "offsets": [81]}, {"text": "learns", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learns"], "offsets": [83]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [79]}}, {"event_type": "PUR", "arguments": [{"text": "new task", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["new", "task"], "offsets": [85, 86]}, {"text": "by understanding the human - readable instructions that define it", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "understanding", "the", "human", "-", "readable", "instructions", "that", "define", "it"], "offsets": [87, 88, 89, 90, 91, 92, 93, 94, 95, 96]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [83]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [102]}, {"text": "natural instructions", "nugget_type": "DST", "argument_type": "Content", "tokens": ["natural", "instructions"], "offsets": [104, 105]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [103]}}, {"event_type": "MDS", "arguments": [{"text": "instructions", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["instructions"], "offsets": [132]}, {"text": "unified schema", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["unified", "schema"], "offsets": [148, 149]}], "trigger": {"text": "mapped", "tokens": ["mapped"], "offsets": [145]}}, {"event_type": "MDS", "arguments": [{"text": "natural instructions", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["natural", "instructions"], "offsets": [104, 105]}, {"text": "measure", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["measure"], "offsets": [158]}, {"text": "meta - dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["meta", "-", "dataset"], "offsets": [153, 154, 155]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [151]}}, {"event_type": "PUR", "arguments": [{"text": "cross - task generalization", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["cross", "-", "task", "generalization"], "offsets": [159, 160, 161, 162]}], "trigger": {"text": "measure", "tokens": ["measure"], "offsets": [158]}}, {"event_type": "MDS", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["models"], "offsets": [165]}, {"text": "on seen tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "seen", "tasks"], "offsets": [166, 167, 168]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [164]}}, {"event_type": "MDS", "arguments": [{"text": "generalization", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["generalization"], "offsets": [171]}, {"text": "remaining unseen ones", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["remaining", "unseen", "ones"], "offsets": [174, 175, 176]}], "trigger": {"text": "measuring", "tokens": ["measuring"], "offsets": [170]}}, {"event_type": "MDS", "arguments": [{"text": "encode", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["encode"], "offsets": [187]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [196]}, {"text": "generative pre - trained language models", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["generative", "pre", "-", "trained", "language", "models"], "offsets": [180, 181, 182, 183, 184, 185]}], "trigger": {"text": "adopt", "tokens": ["adopt"], "offsets": [179]}}, {"event_type": "PUR", "arguments": [{"text": "task - specific instructions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["task", "-", "specific", "instructions"], "offsets": [188, 189, 190, 191]}, {"text": "input", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["input"], "offsets": [194]}], "trigger": {"text": "encode", "tokens": ["encode"], "offsets": [187]}}, {"event_type": "FIN", "arguments": [{"text": "benefit", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["benefit"], "offsets": [205]}], "trigger": {"text": "indicate", "tokens": ["indicate"], "offsets": [202]}}, {"event_type": "FAC", "arguments": [{"text": "natural instructions", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["natural", "instructions"], "offsets": [104, 105]}, {"text": "models", "nugget_type": "APP", "argument_type": "Object", "tokens": ["models"], "offsets": [204]}, {"text": "when evaluated in terms of generalization to unseen tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "evaluated", "in", "terms", "of", "generalization", "to", "unseen", "tasks"], "offsets": [208, 209, 210, 211, 212, 213, 214, 215, 216]}, {"text": "instructions", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["instructions"], "offsets": [207]}], "trigger": {"text": "benefit", "tokens": ["benefit"], "offsets": [205]}}, {"event_type": "PUR", "arguments": [{"text": "task output", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["task", "output"], "offsets": [197, 198]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [196]}}, {"event_type": "FAC", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["models"], "offsets": [228]}, {"text": "estimated performance upperbound", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["estimated", "performance", "upperbound"], "offsets": [236, 237, 238]}], "trigger": {"text": "far behind", "tokens": ["far", "behind"], "offsets": [233, 234]}}], "document": ["humans", "(", "e", ".", "g", ".", ",", "crowdworkers", ")", "have", "a", "remarkable", "ability", "in", "solving", "different", "tasks", ",", "by", "simply", "reading", "textual", "instructions", "that", "define", "them", "and", "looking", "at", "a", "few", "examples", ".", "despite", "the", "success", "of", "the", "conventional", "supervised", "learning", "on", "individual", "datasets", ",", "such", "models", "often", "struggle", "with", "generalization", "across", "tasks", "(", "e", ".", "g", ".", ",", "a", "question", "-", "answering", "system", "cannot", "solve", "classification", "tasks", ")", ".", "a", "long", "-", "standing", "challenge", "in", "ai", "is", "to", "build", "a", "model", "that", "learns", "a", "new", "task", "by", "understanding", "the", "human", "-", "readable", "instructions", "that", "define", "it", ".", "to", "study", "this", ",", "we", "introduce", "natural", "instructions", ",", "a", "dataset", "of", "61", "distinct", "tasks", ",", "their", "human", "-", "authored", "instructions", ",", "and", "193k", "task", "instances", "(", "input", "-", "output", "pairs", ")", ".", "the", "instructions", "are", "obtained", "from", "crowdsourcing", "instructions", "used", "to", "create", "existing", "nlp", "datasets", "and", "mapped", "to", "a", "unified", "schema", ".", "using", "this", "meta", "-", "dataset", ",", "we", "measure", "cross", "-", "task", "generalization", "by", "training", "models", "on", "seen", "tasks", "and", "measuring", "generalization", "to", "the", "remaining", "unseen", "ones", ".", "we", "adopt", "generative", "pre", "-", "trained", "language", "models", "to", "encode", "task", "-", "specific", "instructions", "along", "with", "input", "and", "generate", "task", "output", ".", "our", "results", "indicate", "that", "models", "benefit", "from", "instructions", "when", "evaluated", "in", "terms", "of", "generalization", "to", "unseen", "tasks", "(", "19", "%", "better", "for", "models", "utilizing", "instructions", ")", ".", "these", "models", ",", "however", ",", "are", "far", "behind", "an", "estimated", "performance", "upperbound", "indicating", "significant", "room", "for", "more", "progress", "in", "this", "direction", "."]}, {"venue": "ACL", "title": "From Discourse to Narrative: Knowledge Projection for Event Relation Extraction", "abstract": "Current event-centric knowledge graphs highly rely on explicit connectives to mine relations between events. Unfortunately, due to the sparsity of connectives, these methods severely undermine the coverage of EventKGs. The lack of high-quality labelled corpora further exacerbates that problem. In this paper, we propose a knowledge projection paradigm for event relation extraction: projecting discourse knowledge to narratives by exploiting the commonalities between them. Specifically, we propose Multi-tier Knowledge Projection Network (MKPNet), which can leverage multi-tier discourse knowledge effectively for event relation extraction. In this way, the labelled data requirement is significantly reduced, and implicit event relations can be effectively extracted. Intrinsic experimental results show that MKPNet achieves the new state-of-the-art performance and extrinsic experimental results verify the value of the extracted event relations.", "doc_id": "748712057e80f7ff3a7f3cd33b3373d8", "publication_year": 2021, "sentences": ["current event - centric knowledge graphs highly rely on explicit connectives to mine relations between events .", "unfortunately , due to the sparsity of connectives , these methods severely undermine the coverage of eventkgs .", "the lack of high - quality labelled corpora further exacerbates that problem .", "in this paper , we propose a knowledge projection paradigm for event relation extraction : projecting discourse knowledge to narratives by exploiting the commonalities between them .", "specifically , we propose multi - tier knowledge projection network ( mkpnet ) , which can leverage multi - tier discourse knowledge effectively for event relation extraction .", "in this way , the labelled data requirement is significantly reduced , and implicit event relations can be effectively extracted .", "intrinsic experimental results show that mkpnet achieves the new state - of - the - art performance and extrinsic experimental results verify the value of the extracted event relations ."], "events": [{"event_type": "ITT", "arguments": [{"text": "event - centric knowledge graphs", "nugget_type": "APP", "argument_type": "Target", "tokens": ["event", "-", "centric", "knowledge", "graphs"], "offsets": [1, 2, 3, 4, 5]}], "trigger": {"text": "rely", "tokens": ["rely"], "offsets": [7]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [52]}, {"text": "knowledge projection paradigm", "nugget_type": "APP", "argument_type": "Content", "tokens": ["knowledge", "projection", "paradigm"], "offsets": [55, 56, 57]}, {"text": "event relation extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["event", "relation", "extraction"], "offsets": [59, 60, 61]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [53]}}, {"event_type": "MDS", "arguments": [{"text": "commonalities", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["commonalities"], "offsets": [71]}, {"text": "discourse knowledge", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["discourse", "knowledge"], "offsets": [64, 65]}], "trigger": {"text": "projecting", "tokens": ["projecting"], "offsets": [63]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [77]}, {"text": "multi - tier knowledge projection network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "tier", "knowledge", "projection", "network"], "offsets": [79, 80, 81, 82, 83, 84]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [78]}}, {"event_type": "FAC", "arguments": [{"text": "labelled data requirement", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["labelled", "data", "requirement"], "offsets": [108, 109, 110]}], "trigger": {"text": "reduced", "tokens": ["reduced"], "offsets": [113]}}, {"event_type": "FAC", "arguments": [{"text": "implicit event relations", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["implicit", "event", "relations"], "offsets": [116, 117, 118]}, {"text": "effectively", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["effectively"], "offsets": [121]}], "trigger": {"text": "extracted", "tokens": ["extracted"], "offsets": [122]}}, {"event_type": "FAC", "arguments": [{"text": "multi - tier knowledge projection network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multi", "-", "tier", "knowledge", "projection", "network"], "offsets": [79, 80, 81, 82, 83, 84]}, {"text": "state - of - the - art performance", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [133, 134, 135, 136, 137, 138, 139, 140]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [130]}}, {"event_type": "FAC", "arguments": [{"text": "value of the extracted event relations", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["value", "of", "the", "extracted", "event", "relations"], "offsets": [147, 148, 149, 150, 151, 152]}], "trigger": {"text": "verify", "tokens": ["verify"], "offsets": [145]}}], "document": ["current", "event", "-", "centric", "knowledge", "graphs", "highly", "rely", "on", "explicit", "connectives", "to", "mine", "relations", "between", "events", ".", "unfortunately", ",", "due", "to", "the", "sparsity", "of", "connectives", ",", "these", "methods", "severely", "undermine", "the", "coverage", "of", "eventkgs", ".", "the", "lack", "of", "high", "-", "quality", "labelled", "corpora", "further", "exacerbates", "that", "problem", ".", "in", "this", "paper", ",", "we", "propose", "a", "knowledge", "projection", "paradigm", "for", "event", "relation", "extraction", ":", "projecting", "discourse", "knowledge", "to", "narratives", "by", "exploiting", "the", "commonalities", "between", "them", ".", "specifically", ",", "we", "propose", "multi", "-", "tier", "knowledge", "projection", "network", "(", "mkpnet", ")", ",", "which", "can", "leverage", "multi", "-", "tier", "discourse", "knowledge", "effectively", "for", "event", "relation", "extraction", ".", "in", "this", "way", ",", "the", "labelled", "data", "requirement", "is", "significantly", "reduced", ",", "and", "implicit", "event", "relations", "can", "be", "effectively", "extracted", ".", "intrinsic", "experimental", "results", "show", "that", "mkpnet", "achieves", "the", "new", "state", "-", "of", "-", "the", "-", "art", "performance", "and", "extrinsic", "experimental", "results", "verify", "the", "value", "of", "the", "extracted", "event", "relations", "."]}, {"venue": "ACL", "title": "AdapLeR: Speeding up Inference by Adaptive Length Reduction", "abstract": "Pre-trained language models have shown stellar performance in various downstream tasks. But, this usually comes at the cost of high latency and computation, hindering their usage in resource-limited settings. In this work, we propose a novel approach for reducing the computational cost of BERT with minimal loss in downstream performance. Our method dynamically eliminates less contributing tokens through layers, resulting in shorter lengths and consequently lower computational cost. To determine the importance of each token representation, we train a Contribution Predictor for each layer using a gradient-based saliency method. Our experiments on several diverse classification tasks show speedups up to 22x during inference time without much sacrifice in performance. We also validate the quality of the selected tokens in our method using human annotations in the ERASER benchmark. In comparison to other widely used strategies for selecting important tokens, such as saliency and attention, our proposed method has a significantly lower false positive rate in generating rationales. Our code is freely available at https://github.com/amodaresi/AdapLeR.", "doc_id": "6fed326dcd22aa6648ba27c972819013", "publication_year": 2022, "sentences": ["pre - trained language models have shown stellar performance in various downstream tasks .", "but , this usually comes at the cost of high latency and computation , hindering their usage in resource - limited settings .", "in this work , we propose a novel approach for reducing the computational cost of bert with minimal loss in downstream performance .", "our method dynamically eliminates less contributing tokens through layers , resulting in shorter lengths and consequently lower computational cost .", "to determine the importance of each token representation , we train a contribution predictor for each layer using a gradient - based saliency method .", "our experiments on several diverse classification tasks show speedups up to 22x during inference time without much sacrifice in performance .", "we also validate the quality of the selected tokens in our method using human annotations in the eraser benchmark .", "in comparison to other widely used strategies for selecting important tokens , such as saliency and attention , our proposed method has a significantly lower false positive rate in generating rationales .", "our code is freely available at https : / / github . com / amodaresi / adapler ."], "events": [{"event_type": "ITT", "arguments": [{"text": "downstream tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["downstream", "tasks"], "offsets": [11, 12]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "hindering", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["hindering"], "offsets": [28]}, {"text": "cost of high latency", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["cost", "of", "high", "latency"], "offsets": [21, 22, 23, 24]}, {"text": "cost of high computation", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["cost", "of", "high", "computation"], "offsets": [21, 22, 23, 26]}], "trigger": {"text": "comes", "tokens": ["comes"], "offsets": [18]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [41]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach"], "offsets": [45]}, {"text": "reducing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reducing"], "offsets": [47]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [42]}}, {"event_type": "PUR", "arguments": [{"text": "computational cost of bert", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["computational", "cost", "of", "bert"], "offsets": [49, 50, 51, 52]}, {"text": "with minimal loss in downstream performance", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "minimal", "loss", "in", "downstream", "performance"], "offsets": [53, 54, 55, 56, 57, 58]}], "trigger": {"text": "reducing", "tokens": ["reducing"], "offsets": [47]}}, {"event_type": "MDS", "arguments": [{"text": "less contributing tokens", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["less", "contributing", "tokens"], "offsets": [64, 65, 66]}, {"text": "resulting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["resulting"], "offsets": [70]}, {"text": "layers", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["layers"], "offsets": [68]}], "trigger": {"text": "dynamically eliminates", "tokens": ["dynamically", "eliminates"], "offsets": [62, 63]}}, {"event_type": "PUR", "arguments": [{"text": "shorter lengths", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["shorter", "lengths"], "offsets": [72, 73]}, {"text": "consequently lower computational cost", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["consequently", "lower", "computational", "cost"], "offsets": [75, 76, 77, 78]}], "trigger": {"text": "resulting", "tokens": ["resulting"], "offsets": [70]}}, {"event_type": "FIN", "arguments": [{"text": "up", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["up"], "offsets": [114]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [112]}}, {"event_type": "FAC", "arguments": [{"text": "during inference time", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "inference", "time"], "offsets": [117, 118, 119]}, {"text": "without much sacrifice in performance", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "much", "sacrifice", "in", "performance"], "offsets": [120, 121, 122, 123, 124]}, {"text": "on several diverse classification tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "several", "diverse", "classification", "tasks"], "offsets": [107, 108, 109, 110, 111]}, {"text": "22x", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["22x"], "offsets": [116]}, {"text": "speedups", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["speedups"], "offsets": [113]}], "trigger": {"text": "up", "tokens": ["up"], "offsets": [114]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [126]}, {"text": "validate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["validate"], "offsets": [128]}, {"text": "human annotations in the eraser benchmark", "nugget_type": "APP", "argument_type": "Content", "tokens": ["human", "annotations", "in", "the", "eraser", "benchmark"], "offsets": [139, 140, 141, 142, 143, 144]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [138]}}, {"event_type": "PUR", "arguments": [{"text": "quality of the selected tokens", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["quality", "of", "the", "selected", "tokens"], "offsets": [130, 131, 132, 133, 134]}], "trigger": {"text": "validate", "tokens": ["validate"], "offsets": [128]}}, {"event_type": "CMP", "arguments": [{"text": "other widely used strategies for selecting important tokens", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["other", "widely", "used", "strategies", "for", "selecting", "important", "tokens"], "offsets": [149, 150, 151, 152, 153, 154, 155, 156]}, {"text": "significantly lower false positive rate", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significantly", "lower", "false", "positive", "rate"], "offsets": [169, 170, 171, 172, 173]}, {"text": "in generating rationales", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "generating", "rationales"], "offsets": [174, 175, 176]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [167]}}, {"event_type": "PUR", "arguments": [{"text": "usage", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["usage"], "offsets": [30]}, {"text": "in resource - limited settings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "resource", "-", "limited", "settings"], "offsets": [31, 32, 33, 34, 35]}], "trigger": {"text": "hindering", "tokens": ["hindering"], "offsets": [28]}}, {"event_type": "MDS", "arguments": [{"text": "gradient - based saliency method", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["gradient", "-", "based", "saliency", "method"], "offsets": [99, 100, 101, 102, 103]}, {"text": "contribution predictor", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["contribution", "predictor"], "offsets": [92, 93]}, {"text": "determine", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["determine"], "offsets": [81]}, {"text": "each layer", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["each", "layer"], "offsets": [95, 96]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [90]}}, {"event_type": "PUR", "arguments": [{"text": "importance of each token representation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["importance", "of", "each", "token", "representation"], "offsets": [83, 84, 85, 86, 87]}], "trigger": {"text": "determine", "tokens": ["determine"], "offsets": [81]}}], "document": ["pre", "-", "trained", "language", "models", "have", "shown", "stellar", "performance", "in", "various", "downstream", "tasks", ".", "but", ",", "this", "usually", "comes", "at", "the", "cost", "of", "high", "latency", "and", "computation", ",", "hindering", "their", "usage", "in", "resource", "-", "limited", "settings", ".", "in", "this", "work", ",", "we", "propose", "a", "novel", "approach", "for", "reducing", "the", "computational", "cost", "of", "bert", "with", "minimal", "loss", "in", "downstream", "performance", ".", "our", "method", "dynamically", "eliminates", "less", "contributing", "tokens", "through", "layers", ",", "resulting", "in", "shorter", "lengths", "and", "consequently", "lower", "computational", "cost", ".", "to", "determine", "the", "importance", "of", "each", "token", "representation", ",", "we", "train", "a", "contribution", "predictor", "for", "each", "layer", "using", "a", "gradient", "-", "based", "saliency", "method", ".", "our", "experiments", "on", "several", "diverse", "classification", "tasks", "show", "speedups", "up", "to", "22x", "during", "inference", "time", "without", "much", "sacrifice", "in", "performance", ".", "we", "also", "validate", "the", "quality", "of", "the", "selected", "tokens", "in", "our", "method", "using", "human", "annotations", "in", "the", "eraser", "benchmark", ".", "in", "comparison", "to", "other", "widely", "used", "strategies", "for", "selecting", "important", "tokens", ",", "such", "as", "saliency", "and", "attention", ",", "our", "proposed", "method", "has", "a", "significantly", "lower", "false", "positive", "rate", "in", "generating", "rationales", ".", "our", "code", "is", "freely", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "amodaresi", "/", "adapler", "."]}, {"venue": "ACL", "title": "OTTers: One-turn Topic Transitions for Open-Domain Dialogue", "abstract": "Mixed initiative in open-domain dialogue requires a system to pro-actively introduce new topics. The one-turn topic transition task explores how a system connects two topics in a cooperative and coherent manner. The goal of the task is to generate a \u201cbridging\u201d utterance connecting the new topic to the topic of the previous conversation turn. We are especially interested in commonsense explanations of how a new topic relates to what has been mentioned before. We first collect a new dataset of human one-turn topic transitions, which we callOTTers. We then explore different strategies used by humans when asked to complete such a task, and notice that the use of a bridging utterance to connect the two topics is the approach used the most. We finally show how existing state-of-the-art text generation models can be adapted to this task and examine the performance of these baselines on different splits of the OTTers data.", "doc_id": "ab695fb436d876620cd06dad70520d06", "publication_year": 2021, "sentences": ["mixed initiative in open - domain dialogue requires a system to pro - actively introduce new topics .", "the one - turn topic transition task explores how a system connects two topics in a cooperative and coherent manner .", "the goal of the task is to generate a \u201c bridging \u201d utterance connecting the new topic to the topic of the previous conversation turn .", "we are especially interested in commonsense explanations of how a new topic relates to what has been mentioned before .", "we first collect a new dataset of human one - turn topic transitions , which we callotters .", "we then explore different strategies used by humans when asked to complete such a task , and notice that the use of a bridging utterance to connect the two topics is the approach used the most .", "we finally show how existing state - of - the - art text generation models can be adapted to this task and examine the performance of these baselines on different splits of the otters data ."], "events": [{"event_type": "ITT", "arguments": [{"text": "one - turn topic transition task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["one", "-", "turn", "topic", "transition", "task"], "offsets": [19, 20, 21, 22, 23, 24]}], "trigger": {"text": "explores", "tokens": ["explores"], "offsets": [25]}}, {"event_type": "RWS", "arguments": [{"text": "\u201c bridging \u201d utterance", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["\u201c", "bridging", "\u201d", "utterance"], "offsets": [48, 49, 50, 51]}, {"text": "connecting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["connecting"], "offsets": [52]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [46]}}, {"event_type": "PUR", "arguments": [{"text": "new topic", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["new", "topic"], "offsets": [54, 55]}, {"text": "to the topic of the previous conversation turn", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "the", "topic", "of", "the", "previous", "conversation", "turn"], "offsets": [56, 57, 58, 59, 60, 61, 62, 63]}], "trigger": {"text": "connecting", "tokens": ["connecting"], "offsets": [52]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [85]}, {"text": "dataset of human one - turn topic transitions", "nugget_type": "DST", "argument_type": "Content", "tokens": ["dataset", "of", "human", "one", "-", "turn", "topic", "transitions"], "offsets": [90, 91, 92, 93, 94, 95, 96, 97]}], "trigger": {"text": "collect", "tokens": ["collect"], "offsets": [87]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [103]}, {"text": "different strategies", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["different", "strategies"], "offsets": [106, 107]}, {"text": "used by humans when asked to complete such a task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["used", "by", "humans", "when", "asked", "to", "complete", "such", "a", "task"], "offsets": [108, 109, 110, 111, 112, 113, 114, 115, 116, 117]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [105]}}, {"event_type": "FIN", "arguments": [{"text": "approach", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["approach"], "offsets": [135]}], "trigger": {"text": "notice", "tokens": ["notice"], "offsets": [120]}}, {"event_type": "FAC", "arguments": [{"text": "use of a bridging utterance", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["use", "of", "a", "bridging", "utterance"], "offsets": [123, 124, 125, 126, 127]}, {"text": "to connect the two topics", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "connect", "the", "two", "topics"], "offsets": [128, 129, 130, 131, 132]}, {"text": "used the most", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["used", "the", "most"], "offsets": [136, 137, 138]}], "trigger": {"text": "approach", "tokens": ["approach"], "offsets": [135]}}, {"event_type": "FAC", "arguments": [{"text": "existing state - of - the - art text generation models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "state", "-", "of", "-", "the", "-", "art", "text", "generation", "models"], "offsets": [144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154]}, {"text": "this task", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["one", "-", "turn", "topic", "transition", "task"], "offsets": [19, 20, 21, 22, 23, 24]}], "trigger": {"text": "adapted", "tokens": ["adapted"], "offsets": [157]}}, {"event_type": "FAC", "arguments": [{"text": "performance of these baselines", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance", "of", "these", "baselines"], "offsets": [164, 165, 166, 167]}, {"text": "different splits of the otters data", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["different", "splits", "of", "the", "otters", "data"], "offsets": [169, 170, 171, 172, 173, 174]}], "trigger": {"text": "examine", "tokens": ["examine"], "offsets": [162]}}], "document": ["mixed", "initiative", "in", "open", "-", "domain", "dialogue", "requires", "a", "system", "to", "pro", "-", "actively", "introduce", "new", "topics", ".", "the", "one", "-", "turn", "topic", "transition", "task", "explores", "how", "a", "system", "connects", "two", "topics", "in", "a", "cooperative", "and", "coherent", "manner", ".", "the", "goal", "of", "the", "task", "is", "to", "generate", "a", "\u201c", "bridging", "\u201d", "utterance", "connecting", "the", "new", "topic", "to", "the", "topic", "of", "the", "previous", "conversation", "turn", ".", "we", "are", "especially", "interested", "in", "commonsense", "explanations", "of", "how", "a", "new", "topic", "relates", "to", "what", "has", "been", "mentioned", "before", ".", "we", "first", "collect", "a", "new", "dataset", "of", "human", "one", "-", "turn", "topic", "transitions", ",", "which", "we", "callotters", ".", "we", "then", "explore", "different", "strategies", "used", "by", "humans", "when", "asked", "to", "complete", "such", "a", "task", ",", "and", "notice", "that", "the", "use", "of", "a", "bridging", "utterance", "to", "connect", "the", "two", "topics", "is", "the", "approach", "used", "the", "most", ".", "we", "finally", "show", "how", "existing", "state", "-", "of", "-", "the", "-", "art", "text", "generation", "models", "can", "be", "adapted", "to", "this", "task", "and", "examine", "the", "performance", "of", "these", "baselines", "on", "different", "splits", "of", "the", "otters", "data", "."]}, {"venue": "ACL", "title": "Dual Slot Selector via Local Reliability Verification for Dialogue State Tracking", "abstract": "The goal of dialogue state tracking (DST) is to predict the current dialogue state given all previous dialogue contexts. Existing approaches generally predict the dialogue state at every turn from scratch. However, the overwhelming majority of the slots in each turn should simply inherit the slot values from the previous turn. Therefore, the mechanism of treating slots equally in each turn not only is inefficient but also may lead to additional errors because of the redundant slot value generation. To address this problem, we devise the two-stage DSS-DST which consists of the Dual Slot Selector based on the current turn dialogue, and the Slot Value Generator based on the dialogue history. The Dual Slot Selector determines each slot whether to update slot value or to inherit the slot value from the previous turn from two aspects: (1) if there is a strong relationship between it and the current turn dialogue utterances; (2) if a slot value with high reliability can be obtained for it through the current turn dialogue. The slots selected to be updated are permitted to enter the Slot Value Generator to update values by a hybrid method, while the other slots directly inherit the values from the previous turn. Empirical results show that our method achieves 56.93%, 60.73%, and 58.04% joint accuracy on MultiWOZ 2.0, MultiWOZ 2.1, and MultiWOZ 2.2 datasets respectively and achieves a new state-of-the-art performance with significant improvements.", "doc_id": "3d38f8ea0e9a267292caf03f8e77388d", "publication_year": 2021, "sentences": ["the goal of dialogue state tracking ( dst ) is to predict the current dialogue state given all previous dialogue contexts .", "existing approaches generally predict the dialogue state at every turn from scratch .", "however , the overwhelming majority of the slots in each turn should simply inherit the slot values from the previous turn .", "therefore , the mechanism of treating slots equally in each turn not only is inefficient but also may lead to additional errors because of the redundant slot value generation .", "to address this problem , we devise the two - stage dss - dst which consists of the dual slot selector based on the current turn dialogue , and the slot value generator based on the dialogue history .", "the dual slot selector determines each slot whether to update slot value or to inherit the slot value from the previous turn from two aspects : ( 1 ) if there is a strong relationship between it and the current turn dialogue utterances ; ( 2 ) if a slot value with high reliability can be obtained for it through the current turn dialogue .", "the slots selected to be updated are permitted to enter the slot value generator to update values by a hybrid method , while the other slots directly inherit the values from the previous turn .", "empirical results show that our method achieves 56 . 93 % , 60 . 73 % , and 58 . 04 % joint accuracy on multiwoz 2 . 0 , multiwoz 2 . 1 , and multiwoz 2 . 2 datasets respectively and achieves a new state - of - the - art performance with significant improvements ."], "events": [{"event_type": "ITT", "arguments": [{"text": "dialogue state tracking", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["dialogue", "state", "tracking"], "offsets": [3, 4, 5]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [11]}}, {"event_type": "RWS", "arguments": [{"text": "dialogue state", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["dialogue", "state"], "offsets": [27, 28]}, {"text": "existing approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "approaches"], "offsets": [22, 23]}, {"text": "at every turn from scratch", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "every", "turn", "from", "scratch"], "offsets": [29, 30, 31, 32, 33]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [25]}}, {"event_type": "RWF", "arguments": [{"text": "mechanism of treating slots", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["mechanism", "of", "treating", "slots"], "offsets": [60, 61, 62, 63]}, {"text": "inefficient", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["inefficient"], "offsets": [71]}, {"text": "additional errors", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["additional", "errors"], "offsets": [77, 78]}, {"text": "redundant slot value generation", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["redundant", "slot", "value", "generation"], "offsets": [82, 83, 84, 85]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [75]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [92]}, {"text": "two - stage dss - dst", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "-", "stage", "dss", "-", "dst"], "offsets": [95, 96, 97, 98, 99, 100]}], "trigger": {"text": "devise", "tokens": ["devise"], "offsets": [93]}}, {"event_type": "MDS", "arguments": [{"text": "strong relationship between it and the current turn dialogue utterances", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["strong", "relationship", "between", "it", "and", "the", "current", "turn", "dialogue", "utterances"], "offsets": [159, 160, 161, 162, 163, 164, 165, 166, 167, 168]}, {"text": "slot value with high reliability can be obtained for it through the current turn dialogue", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["slot", "value", "with", "high", "reliability", "can", "be", "obtained", "for", "it", "through", "the", "current", "turn", "dialogue"], "offsets": [175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189]}, {"text": "slot value", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["slot", "value"], "offsets": [136, 137]}], "trigger": {"text": "update", "tokens": ["update"], "offsets": [135]}}, {"event_type": "MDS", "arguments": [{"text": "update", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["update"], "offsets": [206]}, {"text": "slots selected to be updated", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["slots", "selected", "to", "be", "updated"], "offsets": [192, 193, 194, 195, 196]}, {"text": "hybrid method", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["hybrid", "method"], "offsets": [210, 211]}, {"text": "slot value generator", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["slot", "value", "generator"], "offsets": [202, 203, 204]}], "trigger": {"text": "enter", "tokens": ["enter"], "offsets": [200]}}, {"event_type": "PUR", "arguments": [{"text": "values", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["values"], "offsets": [207]}], "trigger": {"text": "update", "tokens": ["update"], "offsets": [206]}}, {"event_type": "MDS", "arguments": [{"text": "other slots", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["other", "slots"], "offsets": [215, 216]}, {"text": "values from the previous turn", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["values", "from", "the", "previous", "turn"], "offsets": [220, 221, 222, 223, 224]}], "trigger": {"text": "inherit", "tokens": ["inherit"], "offsets": [218]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [232]}, {"text": "achieves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["achieves"], "offsets": [269]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [228]}}, {"event_type": "FAC", "arguments": [{"text": "two - stage dss - dst", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["two", "-", "stage", "dss", "-", "dst"], "offsets": [95, 96, 97, 98, 99, 100]}, {"text": "56 . 93 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["56", ".", "93", "%"], "offsets": [233, 234, 235, 236]}, {"text": "multiwoz 2 . 0", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multiwoz", "2", ".", "0"], "offsets": [251, 252, 253, 254]}, {"text": "accuracy", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["accuracy"], "offsets": [249]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [232]}}, {"event_type": "FAC", "arguments": [{"text": "two - stage dss - dst", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["two", "-", "stage", "dss", "-", "dst"], "offsets": [95, 96, 97, 98, 99, 100]}, {"text": "60 . 73 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["60", ".", "73", "%"], "offsets": [238, 239, 240, 241]}, {"text": "accuracy", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["accuracy"], "offsets": [249]}, {"text": "multiwoz 2 . 1", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multiwoz", "2", ".", "1"], "offsets": [256, 257, 258, 259]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [232]}}, {"event_type": "FAC", "arguments": [{"text": "two - stage dss - dst", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["two", "-", "stage", "dss", "-", "dst"], "offsets": [95, 96, 97, 98, 99, 100]}, {"text": "58 . 04 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["58", ".", "04", "%"], "offsets": [244, 245, 246, 247]}, {"text": "accuracy", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["accuracy"], "offsets": [249]}, {"text": "multiwoz 2 . 2 datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multiwoz", "2", ".", "2", "datasets"], "offsets": [262, 263, 264, 265, 266]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [232]}}, {"event_type": "CMP", "arguments": [{"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [272, 273, 274, 275, 276, 277, 278, 279]}, {"text": "improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvements"], "offsets": [282]}, {"text": "significant", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significant"], "offsets": [281]}, {"text": "two - stage dss - dst", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["two", "-", "stage", "dss", "-", "dst"], "offsets": [95, 96, 97, 98, 99, 100]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [269]}}], "document": ["the", "goal", "of", "dialogue", "state", "tracking", "(", "dst", ")", "is", "to", "predict", "the", "current", "dialogue", "state", "given", "all", "previous", "dialogue", "contexts", ".", "existing", "approaches", "generally", "predict", "the", "dialogue", "state", "at", "every", "turn", "from", "scratch", ".", "however", ",", "the", "overwhelming", "majority", "of", "the", "slots", "in", "each", "turn", "should", "simply", "inherit", "the", "slot", "values", "from", "the", "previous", "turn", ".", "therefore", ",", "the", "mechanism", "of", "treating", "slots", "equally", "in", "each", "turn", "not", "only", "is", "inefficient", "but", "also", "may", "lead", "to", "additional", "errors", "because", "of", "the", "redundant", "slot", "value", "generation", ".", "to", "address", "this", "problem", ",", "we", "devise", "the", "two", "-", "stage", "dss", "-", "dst", "which", "consists", "of", "the", "dual", "slot", "selector", "based", "on", "the", "current", "turn", "dialogue", ",", "and", "the", "slot", "value", "generator", "based", "on", "the", "dialogue", "history", ".", "the", "dual", "slot", "selector", "determines", "each", "slot", "whether", "to", "update", "slot", "value", "or", "to", "inherit", "the", "slot", "value", "from", "the", "previous", "turn", "from", "two", "aspects", ":", "(", "1", ")", "if", "there", "is", "a", "strong", "relationship", "between", "it", "and", "the", "current", "turn", "dialogue", "utterances", ";", "(", "2", ")", "if", "a", "slot", "value", "with", "high", "reliability", "can", "be", "obtained", "for", "it", "through", "the", "current", "turn", "dialogue", ".", "the", "slots", "selected", "to", "be", "updated", "are", "permitted", "to", "enter", "the", "slot", "value", "generator", "to", "update", "values", "by", "a", "hybrid", "method", ",", "while", "the", "other", "slots", "directly", "inherit", "the", "values", "from", "the", "previous", "turn", ".", "empirical", "results", "show", "that", "our", "method", "achieves", "56", ".", "93", "%", ",", "60", ".", "73", "%", ",", "and", "58", ".", "04", "%", "joint", "accuracy", "on", "multiwoz", "2", ".", "0", ",", "multiwoz", "2", ".", "1", ",", "and", "multiwoz", "2", ".", "2", "datasets", "respectively", "and", "achieves", "a", "new", "state", "-", "of", "-", "the", "-", "art", "performance", "with", "significant", "improvements", "."]}, {"venue": "ACL", "title": "Contextual Neural Machine Translation Improves Translation of Cataphoric Pronouns", "abstract": "The advent of context-aware NMT has resulted in promising improvements in the overall translation quality and specifically in the translation of discourse phenomena such as pronouns. Previous works have mainly focused on the use of past sentences as context with a focus on anaphora translation. In this work, we investigate the effect of future sentences as context by comparing the performance of a contextual NMT model trained with the future context to the one trained with the past context. Our experiments and evaluation, using generic and pronoun-focused automatic metrics, show that the use of future context not only achieves significant improvements over the context-agnostic Transformer, but also demonstrates comparable and in some cases improved performance over its counterpart trained on past context. We also perform an evaluation on a targeted cataphora test suite and report significant gains over the context-agnostic Transformer in terms of BLEU.", "doc_id": "5fa7959d3da25362090b2fe38f6d0d10", "publication_year": 2020, "sentences": ["the advent of context - aware nmt has resulted in promising improvements in the overall translation quality and specifically in the translation of discourse phenomena such as pronouns .", "previous works have mainly focused on the use of past sentences as context with a focus on anaphora translation .", "in this work , we investigate the effect of future sentences as context by comparing the performance of a contextual nmt model trained with the future context to the one trained with the past context .", "our experiments and evaluation , using generic and pronoun - focused automatic metrics , show that the use of future context not only achieves significant improvements over the context - agnostic transformer , but also demonstrates comparable and in some cases improved performance over its counterpart trained on past context .", "we also perform an evaluation on a targeted cataphora test suite and report significant gains over the context - agnostic transformer in terms of bleu ."], "events": [{"event_type": "ITT", "arguments": [{"text": "context - aware nmt", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["context", "-", "aware", "nmt"], "offsets": [3, 4, 5, 6]}], "trigger": {"text": "resulted", "tokens": ["resulted"], "offsets": [8]}}, {"event_type": "RWS", "arguments": [{"text": "previous works", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "works"], "offsets": [29, 30]}, {"text": "anaphora translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["anaphora", "translation"], "offsets": [46, 47]}, {"text": "past sentences", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["past", "sentences"], "offsets": [38, 39]}, {"text": "context", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["context"], "offsets": [41]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [36]}}, {"event_type": "MDS", "arguments": [{"text": "contextual nmt model trained with the future context", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["contextual", "nmt", "model", "trained", "with", "the", "future", "context"], "offsets": [68, 69, 70, 71, 72, 73, 74, 75]}, {"text": "one trained with the past context", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["one", "trained", "with", "the", "past", "context"], "offsets": [78, 79, 80, 81, 82, 83]}, {"text": "investigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["investigate"], "offsets": [54]}], "trigger": {"text": "comparing", "tokens": ["comparing"], "offsets": [63]}}, {"event_type": "PUR", "arguments": [{"text": "effect of future sentences as context", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["effect", "of", "future", "sentences", "as", "context"], "offsets": [56, 57, 58, 59, 60, 61]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [54]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [108]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [99]}}, {"event_type": "FAC", "arguments": [{"text": "use of future context", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["use", "of", "future", "context"], "offsets": [102, 103, 104, 105]}, {"text": "significant improvements", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["significant", "improvements"], "offsets": [109, 110]}, {"text": "over the context - agnostic transformer", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "the", "context", "-", "agnostic", "transformer"], "offsets": [111, 112, 113, 114, 115, 116]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [108]}}, {"event_type": "FAC", "arguments": [{"text": "comparable", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["comparable"], "offsets": [121]}], "trigger": {"text": "demonstrates", "tokens": ["demonstrates"], "offsets": [120]}}, {"event_type": "CMP", "arguments": [{"text": "its counterpart trained on past context", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["its", "counterpart", "trained", "on", "past", "context"], "offsets": [129, 130, 131, 132, 133, 134]}, {"text": "improved", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improved"], "offsets": [126]}], "trigger": {"text": "improved", "tokens": ["improved"], "offsets": [126]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [136]}, {"text": "evaluation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["evaluation"], "offsets": [140]}, {"text": "on a targeted cataphora test suite", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "a", "targeted", "cataphora", "test", "suite"], "offsets": [141, 142, 143, 144, 145, 146]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [138]}}, {"event_type": "CMP", "arguments": [{"text": "context - agnostic transformer", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["context", "-", "agnostic", "transformer"], "offsets": [153, 154, 155, 156]}, {"text": "significant gains", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significant", "gains"], "offsets": [149, 150]}, {"text": "bleu", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["bleu"], "offsets": [160]}], "trigger": {"text": "report", "tokens": ["report"], "offsets": [148]}}], "document": ["the", "advent", "of", "context", "-", "aware", "nmt", "has", "resulted", "in", "promising", "improvements", "in", "the", "overall", "translation", "quality", "and", "specifically", "in", "the", "translation", "of", "discourse", "phenomena", "such", "as", "pronouns", ".", "previous", "works", "have", "mainly", "focused", "on", "the", "use", "of", "past", "sentences", "as", "context", "with", "a", "focus", "on", "anaphora", "translation", ".", "in", "this", "work", ",", "we", "investigate", "the", "effect", "of", "future", "sentences", "as", "context", "by", "comparing", "the", "performance", "of", "a", "contextual", "nmt", "model", "trained", "with", "the", "future", "context", "to", "the", "one", "trained", "with", "the", "past", "context", ".", "our", "experiments", "and", "evaluation", ",", "using", "generic", "and", "pronoun", "-", "focused", "automatic", "metrics", ",", "show", "that", "the", "use", "of", "future", "context", "not", "only", "achieves", "significant", "improvements", "over", "the", "context", "-", "agnostic", "transformer", ",", "but", "also", "demonstrates", "comparable", "and", "in", "some", "cases", "improved", "performance", "over", "its", "counterpart", "trained", "on", "past", "context", ".", "we", "also", "perform", "an", "evaluation", "on", "a", "targeted", "cataphora", "test", "suite", "and", "report", "significant", "gains", "over", "the", "context", "-", "agnostic", "transformer", "in", "terms", "of", "bleu", "."]}, {"venue": "ACL", "title": "FormNet: Structural Encoding beyond Sequential Modeling in Form Document Information Extraction", "abstract": "Sequence modeling has demonstrated state-of-the-art performance on natural language and document understanding tasks. However, it is challenging to correctly serialize tokens in form-like documents in practice due to their variety of layout patterns. We propose FormNet, a structure-aware sequence model to mitigate the suboptimal serialization of forms. First, we design Rich Attention that leverages the spatial relationship between tokens in a form for more precise attention score calculation. Second, we construct Super-Tokens for each word by embedding representations from their neighboring tokens through graph convolutions. FormNet therefore explicitly recovers local syntactic information that may have been lost during serialization. In experiments, FormNet outperforms existing methods with a more compact model size and less pre-training data, establishing new state-of-the-art performance on CORD, FUNSD and Payment benchmarks.", "doc_id": "434fcb87b31863401e0b5f6093ba995a", "publication_year": 2022, "sentences": ["sequence modeling has demonstrated state - of - the - art performance on natural language and document understanding tasks .", "however , it is challenging to correctly serialize tokens in form - like documents in practice due to their variety of layout patterns .", "we propose formnet , a structure - aware sequence model to mitigate the suboptimal serialization of forms .", "first , we design rich attention that leverages the spatial relationship between tokens in a form for more precise attention score calculation .", "second , we construct super - tokens for each word by embedding representations from their neighboring tokens through graph convolutions .", "formnet therefore explicitly recovers local syntactic information that may have been lost during serialization .", "in experiments , formnet outperforms existing methods with a more compact model size and less pre - training data , establishing new state - of - the - art performance on cord , funsd and payment benchmarks ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language"], "offsets": [13, 14]}, {"text": "document understanding", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["document", "understanding"], "offsets": [16, 17]}], "trigger": {"text": "demonstrated", "tokens": ["demonstrated"], "offsets": [3]}}, {"event_type": "RWF", "arguments": [{"text": "correctly serialize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["correctly", "serialize"], "offsets": [26, 27]}, {"text": "in practice", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "practice"], "offsets": [34, 35]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [24]}}, {"event_type": "PUR", "arguments": [{"text": "tokens in form - like documents", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["tokens", "in", "form", "-", "like", "documents"], "offsets": [28, 29, 30, 31, 32, 33]}], "trigger": {"text": "correctly serialize", "tokens": ["correctly", "serialize"], "offsets": [26, 27]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [44]}, {"text": "mitigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["mitigate"], "offsets": [55]}, {"text": "structure - aware sequence model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["structure", "-", "aware", "sequence", "model"], "offsets": [49, 50, 51, 52, 53]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [45]}}, {"event_type": "PUR", "arguments": [{"text": "suboptimal serialization of forms", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["suboptimal", "serialization", "of", "forms"], "offsets": [57, 58, 59, 60]}], "trigger": {"text": "mitigate", "tokens": ["mitigate"], "offsets": [55]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [64]}, {"text": "rich attention", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["rich", "attention"], "offsets": [66, 67]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [65]}}, {"event_type": "MDS", "arguments": [{"text": "more precise attention score calculation", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["more", "precise", "attention", "score", "calculation"], "offsets": [79, 80, 81, 82, 83]}, {"text": "spatial relationship between tokens", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["spatial", "relationship", "between", "tokens"], "offsets": [71, 72, 73, 74]}], "trigger": {"text": "leverages", "tokens": ["leverages"], "offsets": [69]}}, {"event_type": "MDS", "arguments": [{"text": "construct", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["construct"], "offsets": [88]}, {"text": "representations from their neighboring tokens", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["representations", "from", "their", "neighboring", "tokens"], "offsets": [97, 98, 99, 100, 101]}, {"text": "graph convolutions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["graph", "convolutions"], "offsets": [103, 104]}], "trigger": {"text": "embedding", "tokens": ["embedding"], "offsets": [96]}}, {"event_type": "PUR", "arguments": [{"text": "super - tokens", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["super", "-", "tokens"], "offsets": [89, 90, 91]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [88]}}, {"event_type": "CMP", "arguments": [{"text": "formnet", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["formnet"], "offsets": [124]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [125]}, {"text": "existing methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "methods"], "offsets": [126, 127]}, {"text": "more compact model size", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "compact", "model", "size"], "offsets": [130, 131, 132, 133]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [125]}}, {"event_type": "FAC", "arguments": [{"text": "formnet", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["formnet"], "offsets": [124]}, {"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [143, 144, 145, 146, 147, 148, 149, 150]}, {"text": "on cord , funsd and payment benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "cord", ",", "funsd", "and", "payment", "benchmarks"], "offsets": [151, 152, 153, 154, 155, 156, 157]}], "trigger": {"text": "establishing", "tokens": ["establishing"], "offsets": [141]}}, {"event_type": "WKS", "arguments": [{"text": "local syntactic information", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["local", "syntactic", "information"], "offsets": [110, 111, 112]}], "trigger": {"text": "explicitly recovers", "tokens": ["explicitly", "recovers"], "offsets": [108, 109]}}], "document": ["sequence", "modeling", "has", "demonstrated", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "natural", "language", "and", "document", "understanding", "tasks", ".", "however", ",", "it", "is", "challenging", "to", "correctly", "serialize", "tokens", "in", "form", "-", "like", "documents", "in", "practice", "due", "to", "their", "variety", "of", "layout", "patterns", ".", "we", "propose", "formnet", ",", "a", "structure", "-", "aware", "sequence", "model", "to", "mitigate", "the", "suboptimal", "serialization", "of", "forms", ".", "first", ",", "we", "design", "rich", "attention", "that", "leverages", "the", "spatial", "relationship", "between", "tokens", "in", "a", "form", "for", "more", "precise", "attention", "score", "calculation", ".", "second", ",", "we", "construct", "super", "-", "tokens", "for", "each", "word", "by", "embedding", "representations", "from", "their", "neighboring", "tokens", "through", "graph", "convolutions", ".", "formnet", "therefore", "explicitly", "recovers", "local", "syntactic", "information", "that", "may", "have", "been", "lost", "during", "serialization", ".", "in", "experiments", ",", "formnet", "outperforms", "existing", "methods", "with", "a", "more", "compact", "model", "size", "and", "less", "pre", "-", "training", "data", ",", "establishing", "new", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "cord", ",", "funsd", "and", "payment", "benchmarks", "."]}, {"venue": "ACL", "title": "What Kind of Language Is Hard to Language-Model?", "abstract": "How language-agnostic are current state-of-the-art NLP tools? Are there some types of language that are easier to model with current methods? In prior work (Cotterell et al., 2018) we attempted to address this question for language modeling, and observed that recurrent neural network language models do not perform equally well over all the high-resource European languages found in the Europarl corpus. We speculated that inflectional morphology may be the primary culprit for the discrepancy. In this paper, we extend these earlier experiments to cover 69 languages from 13 language families using a multilingual Bible corpus. Methodologically, we introduce a new paired-sample multiplicative mixed-effects model to obtain language difficulty coefficients from at-least-pairwise parallel corpora. In other words, the model is aware of inter-sentence variation and can handle missing data. Exploiting this model, we show that \u201ctranslationese\u201d is not any easier to model than natively written language in a fair comparison. Trying to answer the question of what features difficult languages have in common, we try and fail to reproduce our earlier (Cotterell et al., 2018) observation about morphological complexity and instead reveal far simpler statistics of the data that seem to drive complexity in a much larger sample.", "doc_id": "2995b0b7912aaf0981cb1906c2548c81", "publication_year": 2019, "sentences": ["how language - agnostic are current state - of - the - art nlp tools ?", "are there some types of language that are easier to model with current methods ?", "in prior work ( cotterell et al . , 2018 ) we attempted to address this question for language modeling , and observed that recurrent neural network language models do not perform equally well over all the high - resource european languages found in the europarl corpus .", "we speculated that inflectional morphology may be the primary culprit for the discrepancy .", "in this paper , we extend these earlier experiments to cover 69 languages from 13 language families using a multilingual bible corpus .", "methodologically , we introduce a new paired - sample multiplicative mixed - effects model to obtain language difficulty coefficients from at - least - pairwise parallel corpora .", "in other words , the model is aware of inter - sentence variation and can handle missing data .", "exploiting this model , we show that \u201c", "translationese", "\u201d is not any easier to model than natively written language in a fair comparison .", "trying to answer the question of what features difficult languages have in common , we try and fail to reproduce our earlier ( cotterell et al . , 2018 ) observation about morphological complexity and instead reveal far simpler statistics of the data that seem to drive complexity in a much larger sample ."], "events": [{"event_type": "ITT", "arguments": [{"text": "nlp tools", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nlp", "tools"], "offsets": [13, 14]}], "trigger": {"text": "agnostic", "tokens": ["agnostic"], "offsets": [3]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [97]}, {"text": "earlier experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["earlier", "experiments"], "offsets": [100, 101]}, {"text": "cover", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["cover"], "offsets": [103]}], "trigger": {"text": "extend", "tokens": ["extend"], "offsets": [98]}}, {"event_type": "PUR", "arguments": [{"text": "69 languages from 13 language families", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["69", "languages", "from", "13", "language", "families"], "offsets": [104, 105, 106, 107, 108, 109]}], "trigger": {"text": "cover", "tokens": ["cover"], "offsets": [103]}}, {"event_type": "WKS", "arguments": [{"text": "multilingual bible corpus", "nugget_type": "DST", "argument_type": "Content", "tokens": ["multilingual", "bible", "corpus"], "offsets": [112, 113, 114]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [110]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [118]}, {"text": "paired - sample multiplicative mixed - effects model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["paired", "-", "sample", "multiplicative", "mixed", "-", "effects", "model"], "offsets": [122, 123, 124, 125, 126, 127, 128, 129]}, {"text": "obtain", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["obtain"], "offsets": [131]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [119]}}, {"event_type": "PUR", "arguments": [{"text": "language difficulty coefficients", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["language", "difficulty", "coefficients"], "offsets": [132, 133, 134]}, {"text": "at - least - pairwise parallel corpora", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["at", "-", "least", "-", "pairwise", "parallel", "corpora"], "offsets": [136, 137, 138, 139, 140, 141, 142]}], "trigger": {"text": "obtain", "tokens": ["obtain"], "offsets": [131]}}, {"event_type": "FAC", "arguments": [{"text": "much larger sample", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["much", "larger", "sample"], "offsets": [238, 239, 240]}, {"text": "far simpler statistics of the data", "nugget_type": "DST", "argument_type": "Object", "tokens": ["far", "simpler", "statistics", "of", "the", "data"], "offsets": [225, 226, 227, 228, 229, 230]}, {"text": "complexity", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["complexity"], "offsets": [235]}], "trigger": {"text": "reveal", "tokens": ["reveal"], "offsets": [224]}}], "document": ["how", "language", "-", "agnostic", "are", "current", "state", "-", "of", "-", "the", "-", "art", "nlp", "tools", "?", "are", "there", "some", "types", "of", "language", "that", "are", "easier", "to", "model", "with", "current", "methods", "?", "in", "prior", "work", "(", "cotterell", "et", "al", ".", ",", "2018", ")", "we", "attempted", "to", "address", "this", "question", "for", "language", "modeling", ",", "and", "observed", "that", "recurrent", "neural", "network", "language", "models", "do", "not", "perform", "equally", "well", "over", "all", "the", "high", "-", "resource", "european", "languages", "found", "in", "the", "europarl", "corpus", ".", "we", "speculated", "that", "inflectional", "morphology", "may", "be", "the", "primary", "culprit", "for", "the", "discrepancy", ".", "in", "this", "paper", ",", "we", "extend", "these", "earlier", "experiments", "to", "cover", "69", "languages", "from", "13", "language", "families", "using", "a", "multilingual", "bible", "corpus", ".", "methodologically", ",", "we", "introduce", "a", "new", "paired", "-", "sample", "multiplicative", "mixed", "-", "effects", "model", "to", "obtain", "language", "difficulty", "coefficients", "from", "at", "-", "least", "-", "pairwise", "parallel", "corpora", ".", "in", "other", "words", ",", "the", "model", "is", "aware", "of", "inter", "-", "sentence", "variation", "and", "can", "handle", "missing", "data", ".", "exploiting", "this", "model", ",", "we", "show", "that", "\u201c", "translationese", "\u201d", "is", "not", "any", "easier", "to", "model", "than", "natively", "written", "language", "in", "a", "fair", "comparison", ".", "trying", "to", "answer", "the", "question", "of", "what", "features", "difficult", "languages", "have", "in", "common", ",", "we", "try", "and", "fail", "to", "reproduce", "our", "earlier", "(", "cotterell", "et", "al", ".", ",", "2018", ")", "observation", "about", "morphological", "complexity", "and", "instead", "reveal", "far", "simpler", "statistics", "of", "the", "data", "that", "seem", "to", "drive", "complexity", "in", "a", "much", "larger", "sample", "."]}, {"venue": "ACL", "title": "Spatial Aggregation Facilitates Discovery of Spatial Topics", "abstract": "Spatial aggregation refers to merging of documents created at the same spatial location. We show that by spatial aggregation of a large collection of documents and applying a traditional topic discovery algorithm on the aggregated data we can efficiently discover spatially distinct topics. By looking at topic discovery through matrix factorization lenses we show that spatial aggregation allows low rank approximation of the original document-word matrix, in which spatially distinct topics are preserved and non-spatial topics are aggregated into a single topic. Our experiments on synthetic data confirm this observation. Our experiments on 4.7 million tweets collected during the Sandy Hurricane in 2012 show that spatial and temporal aggregation allows rapid discovery of relevant spatial and temporal topics during that period. Our work indicates that different forms of document aggregation might be effective in rapid discovery of various types of distinct topics from large collections of documents.", "doc_id": "9f32fcae854d9166397c97d473e72114", "publication_year": 2019, "sentences": ["spatial aggregation refers to merging of documents created at the same spatial location .", "we show that by spatial aggregation of a large collection of documents and applying a traditional topic discovery algorithm on the aggregated data we can efficiently discover spatially distinct topics .", "by looking at topic discovery through matrix factorization lenses we show that spatial aggregation allows low rank approximation of the original document - word matrix , in which spatially distinct topics are preserved and non - spatial topics are aggregated into a single topic .", "our experiments on synthetic data confirm this observation .", "our experiments on 4 . 7 million tweets collected during the sandy hurricane in 2012 show that spatial and temporal aggregation allows rapid discovery of relevant spatial and temporal topics during that period .", "our work indicates that different forms of document aggregation might be effective in rapid discovery of various types of distinct topics from large collections of documents ."], "events": [{"event_type": "ITT", "arguments": [{"text": "spatial aggregation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["spatial", "aggregation"], "offsets": [0, 1]}], "trigger": {"text": "refers", "tokens": ["refers"], "offsets": [2]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [14]}, {"text": "discover", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["discover"], "offsets": [40]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [15]}}, {"event_type": "FAC", "arguments": [{"text": "efficiently", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["efficiently"], "offsets": [39]}, {"text": "spatially distinct topics", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["spatially", "distinct", "topics"], "offsets": [41, 42, 43]}, {"text": "by spatial aggregation of a large collection of documents and applying a traditional topic discovery algorithm on the aggregated data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "spatial", "aggregation", "of", "a", "large", "collection", "of", "documents", "and", "applying", "a", "traditional", "topic", "discovery", "algorithm", "on", "the", "aggregated", "data"], "offsets": [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36]}], "trigger": {"text": "discover", "tokens": ["discover"], "offsets": [40]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [54]}, {"text": "allows", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["allows"], "offsets": [59]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [55]}}, {"event_type": "FAC", "arguments": [{"text": "by looking at topic discovery through matrix factorization lenses", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "looking", "at", "topic", "discovery", "through", "matrix", "factorization", "lenses"], "offsets": [45, 46, 47, 48, 49, 50, 51, 52, 53]}, {"text": "spatial aggregation", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["spatial", "aggregation"], "offsets": [57, 58]}, {"text": "low rank approximation of the original document - word matrix", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["low", "rank", "approximation", "of", "the", "original", "document", "-", "word", "matrix"], "offsets": [60, 61, 62, 63, 64, 65, 66, 67, 68, 69]}], "trigger": {"text": "allows", "tokens": ["allows"], "offsets": [59]}}, {"event_type": "FIN", "arguments": [{"text": "allows", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["allows"], "offsets": [120]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [114]}}, {"event_type": "FAC", "arguments": [{"text": "spatial and temporal aggregation", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["spatial", "and", "temporal", "aggregation"], "offsets": [116, 117, 118, 119]}, {"text": "rapid discovery of relevant spatial and temporal topics", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["rapid", "discovery", "of", "relevant", "spatial", "and", "temporal", "topics"], "offsets": [121, 122, 123, 124, 125, 126, 127, 128]}], "trigger": {"text": "allows", "tokens": ["allows"], "offsets": [120]}}, {"event_type": "FIN", "arguments": [{"text": "effective", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["effective"], "offsets": [144]}], "trigger": {"text": "indicates", "tokens": ["indicates"], "offsets": [135]}}, {"event_type": "FAC", "arguments": [{"text": "different forms of document aggregation", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["different", "forms", "of", "document", "aggregation"], "offsets": [137, 138, 139, 140, 141]}, {"text": "rapid discovery of various types of distinct topics", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["rapid", "discovery", "of", "various", "types", "of", "distinct", "topics"], "offsets": [146, 147, 148, 149, 150, 151, 152, 153]}, {"text": "from large collections of documents", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "large", "collections", "of", "documents"], "offsets": [154, 155, 156, 157, 158]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [144]}}], "document": ["spatial", "aggregation", "refers", "to", "merging", "of", "documents", "created", "at", "the", "same", "spatial", "location", ".", "we", "show", "that", "by", "spatial", "aggregation", "of", "a", "large", "collection", "of", "documents", "and", "applying", "a", "traditional", "topic", "discovery", "algorithm", "on", "the", "aggregated", "data", "we", "can", "efficiently", "discover", "spatially", "distinct", "topics", ".", "by", "looking", "at", "topic", "discovery", "through", "matrix", "factorization", "lenses", "we", "show", "that", "spatial", "aggregation", "allows", "low", "rank", "approximation", "of", "the", "original", "document", "-", "word", "matrix", ",", "in", "which", "spatially", "distinct", "topics", "are", "preserved", "and", "non", "-", "spatial", "topics", "are", "aggregated", "into", "a", "single", "topic", ".", "our", "experiments", "on", "synthetic", "data", "confirm", "this", "observation", ".", "our", "experiments", "on", "4", ".", "7", "million", "tweets", "collected", "during", "the", "sandy", "hurricane", "in", "2012", "show", "that", "spatial", "and", "temporal", "aggregation", "allows", "rapid", "discovery", "of", "relevant", "spatial", "and", "temporal", "topics", "during", "that", "period", ".", "our", "work", "indicates", "that", "different", "forms", "of", "document", "aggregation", "might", "be", "effective", "in", "rapid", "discovery", "of", "various", "types", "of", "distinct", "topics", "from", "large", "collections", "of", "documents", "."]}, {"venue": "ACL", "title": "InfoSurgeon: Cross-Media Fine-grained Information Consistency Checking for Fake News Detection", "abstract": "To defend against machine-generated fake news, an effective mechanism is urgently needed. We contribute a novel benchmark for fake news detection at the knowledge element level, as well as a solution for this task which incorporates cross-media consistency checking to detect the fine-grained knowledge elements making news articles misinformative. Due to training data scarcity, we also formulate a novel data synthesis method by manipulating knowledge elements within the knowledge graph to generate noisy training data with specific, hard to detect, known inconsistencies. Our detection approach outperforms the state-of-the-art (up to 16.8% accuracy gain), and more critically, yields fine-grained explanations.", "doc_id": "f60d2567aead1695013244abab4dbc04", "publication_year": 2021, "sentences": ["to defend against machine - generated fake news , an effective mechanism is urgently needed .", "we contribute a novel benchmark for fake news detection at the knowledge element level , as well as a solution for this task which incorporates cross - media consistency checking to detect the fine - grained knowledge elements making news articles misinformative .", "due to training data scarcity , we also formulate a novel data synthesis method by manipulating knowledge elements within the knowledge graph to generate noisy training data with specific , hard to detect , known inconsistencies .", "our detection approach outperforms the state - of - the - art ( up to 16 . 8 % accuracy gain ) , and more critically , yields fine - grained explanations ."], "events": [{"event_type": "ITT", "arguments": [{"text": "machine - generated fake news", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["machine", "-", "generated", "fake", "news"], "offsets": [3, 4, 5, 6, 7]}], "trigger": {"text": "defend", "tokens": ["defend"], "offsets": [1]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [16]}, {"text": "benchmark", "nugget_type": "APP", "argument_type": "Content", "tokens": ["benchmark"], "offsets": [20]}, {"text": "solution", "nugget_type": "APP", "argument_type": "Content", "tokens": ["solution"], "offsets": [35]}, {"text": "detect", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["detect"], "offsets": [47]}], "trigger": {"text": "contribute", "tokens": ["contribute"], "offsets": [17]}}, {"event_type": "PUR", "arguments": [{"text": "fine - grained knowledge elements", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["fine", "-", "grained", "knowledge", "elements"], "offsets": [49, 50, 51, 52, 53]}], "trigger": {"text": "detect", "tokens": ["detect"], "offsets": [47]}}, {"event_type": "PRP", "arguments": [{"text": "data synthesis method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["data", "synthesis", "method"], "offsets": [70, 71, 72]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [65]}], "trigger": {"text": "formulate", "tokens": ["formulate"], "offsets": [67]}}, {"event_type": "MDS", "arguments": [{"text": "knowledge elements", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["knowledge", "elements"], "offsets": [75, 76]}, {"text": "knowledge graph", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["knowledge", "graph"], "offsets": [79, 80]}, {"text": "noisy training data", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["noisy", "training", "data"], "offsets": [83, 84, 85]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [82]}}, {"event_type": "CMP", "arguments": [{"text": "detection approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["detection", "approach"], "offsets": [97, 98]}, {"text": "state - of - the - art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [101, 102, 103, 104, 105, 106, 107]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [99]}}, {"event_type": "FAC", "arguments": [{"text": "fine - grained explanations", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["fine", "-", "grained", "explanations"], "offsets": [124, 125, 126, 127]}, {"text": "detection approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["detection", "approach"], "offsets": [97, 98]}], "trigger": {"text": "yields", "tokens": ["yields"], "offsets": [123]}}], "document": ["to", "defend", "against", "machine", "-", "generated", "fake", "news", ",", "an", "effective", "mechanism", "is", "urgently", "needed", ".", "we", "contribute", "a", "novel", "benchmark", "for", "fake", "news", "detection", "at", "the", "knowledge", "element", "level", ",", "as", "well", "as", "a", "solution", "for", "this", "task", "which", "incorporates", "cross", "-", "media", "consistency", "checking", "to", "detect", "the", "fine", "-", "grained", "knowledge", "elements", "making", "news", "articles", "misinformative", ".", "due", "to", "training", "data", "scarcity", ",", "we", "also", "formulate", "a", "novel", "data", "synthesis", "method", "by", "manipulating", "knowledge", "elements", "within", "the", "knowledge", "graph", "to", "generate", "noisy", "training", "data", "with", "specific", ",", "hard", "to", "detect", ",", "known", "inconsistencies", ".", "our", "detection", "approach", "outperforms", "the", "state", "-", "of", "-", "the", "-", "art", "(", "up", "to", "16", ".", "8", "%", "accuracy", "gain", ")", ",", "and", "more", "critically", ",", "yields", "fine", "-", "grained", "explanations", "."]}, {"venue": "ACL", "title": "Explicit Semantic Decomposition for Definition Generation", "abstract": "Definition generation, which aims to automatically generate dictionary definitions for words, has recently been proposed to assist the construction of dictionaries and help people understand unfamiliar texts. However, previous works hardly consider explicitly modeling the \u201ccomponents\u201d of definitions, leading to under-specific generation results. In this paper, we propose ESD, namely Explicit Semantic Decomposition for definition Generation, which explicitly decomposes the meaning of words into semantic components, and models them with discrete latent variables for definition generation. Experimental results show that achieves top results on WordNet and Oxford benchmarks, outperforming strong previous baselines.", "doc_id": "cb71b02b535016a0b9fe791c9d83105c", "publication_year": 2020, "sentences": ["definition generation , which aims to automatically generate dictionary definitions for words , has recently been proposed to assist the construction of dictionaries and help people understand unfamiliar texts .", "however , previous works hardly consider explicitly modeling the \u201c components \u201d of definitions , leading to under - specific generation results .", "in this paper , we propose esd , namely explicit semantic decomposition for definition generation , which explicitly decomposes the meaning of words into semantic components , and models them with discrete latent variables for definition generation .", "experimental results show that achieves top results on wordnet and oxford benchmarks , outperforming strong previous baselines ."], "events": [{"event_type": "ITT", "arguments": [{"text": "definition generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["definition", "generation"], "offsets": [0, 1]}], "trigger": {"text": "proposed", "tokens": ["proposed"], "offsets": [16]}}, {"event_type": "RWF", "arguments": [{"text": "previous works", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["previous", "works"], "offsets": [32, 33]}, {"text": "hardly consider", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["hardly", "consider"], "offsets": [34, 35]}], "trigger": {"text": "hardly consider", "tokens": ["hardly", "consider"], "offsets": [34, 35]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [57]}, {"text": "definition generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["definition", "generation"], "offsets": [66, 67]}, {"text": "explicit semantic decomposition", "nugget_type": "APP", "argument_type": "Content", "tokens": ["explicit", "semantic", "decomposition"], "offsets": [62, 63, 64]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [58]}}, {"event_type": "MDS", "arguments": [{"text": "meaning of words", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["meaning", "of", "words"], "offsets": [73, 74, 75]}, {"text": "semantic components", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["semantic", "components"], "offsets": [77, 78]}], "trigger": {"text": "decomposes", "tokens": ["decomposes"], "offsets": [71]}}, {"event_type": "MDS", "arguments": [{"text": "discrete latent variables", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["discrete", "latent", "variables"], "offsets": [84, 85, 86]}, {"text": "definition generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["definition", "generation"], "offsets": [88, 89]}, {"text": "semantic components", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["semantic", "components"], "offsets": [77, 78]}], "trigger": {"text": "models", "tokens": ["models"], "offsets": [81]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [95]}, {"text": "outperforming", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforming"], "offsets": [104]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [93]}}, {"event_type": "FAC", "arguments": [{"text": "top results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["top", "results"], "offsets": [96, 97]}, {"text": "on wordnet and oxford benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "wordnet", "and", "oxford", "benchmarks"], "offsets": [98, 99, 100, 101, 102]}, {"text": "explicit semantic decomposition", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["explicit", "semantic", "decomposition"], "offsets": [62, 63, 64]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [95]}}, {"event_type": "CMP", "arguments": [{"text": "outperforming", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforming"], "offsets": [104]}, {"text": "strong previous baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "previous", "baselines"], "offsets": [105, 106, 107]}], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [104]}}, {"event_type": "PUR", "arguments": [{"text": "\u201c components \u201d of definitions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["\u201c", "components", "\u201d", "of", "definitions"], "offsets": [39, 40, 41, 42, 43]}], "trigger": {"text": "modeling", "tokens": ["modeling"], "offsets": [37]}}, {"event_type": "RWF", "arguments": [{"text": "under - specific generation results", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["under", "-", "specific", "generation", "results"], "offsets": [47, 48, 49, 50, 51]}], "trigger": {"text": "leading", "tokens": ["leading"], "offsets": [45]}}], "document": ["definition", "generation", ",", "which", "aims", "to", "automatically", "generate", "dictionary", "definitions", "for", "words", ",", "has", "recently", "been", "proposed", "to", "assist", "the", "construction", "of", "dictionaries", "and", "help", "people", "understand", "unfamiliar", "texts", ".", "however", ",", "previous", "works", "hardly", "consider", "explicitly", "modeling", "the", "\u201c", "components", "\u201d", "of", "definitions", ",", "leading", "to", "under", "-", "specific", "generation", "results", ".", "in", "this", "paper", ",", "we", "propose", "esd", ",", "namely", "explicit", "semantic", "decomposition", "for", "definition", "generation", ",", "which", "explicitly", "decomposes", "the", "meaning", "of", "words", "into", "semantic", "components", ",", "and", "models", "them", "with", "discrete", "latent", "variables", "for", "definition", "generation", ".", "experimental", "results", "show", "that", "achieves", "top", "results", "on", "wordnet", "and", "oxford", "benchmarks", ",", "outperforming", "strong", "previous", "baselines", "."]}, {"venue": "ACL", "title": "Distilling Translations with Visual Awareness", "abstract": "Previous work on multimodal machine translation has shown that visual information is only needed in very specific cases, for example in the presence of ambiguous words where the textual context is not sufficient. As a consequence, models tend to learn to ignore this information. We propose a translate-and-refine approach to this problem where images are only used by a second stage decoder. This approach is trained jointly to generate a good first draft translation and to improve over this draft by (i) making better use of the target language textual context (both left and right-side contexts) and (ii) making use of visual context. This approach leads to the state of the art results. Additionally, we show that it has the ability to recover from erroneous or missing words in the source language.", "doc_id": "44830fa2a35e5491cb2429218c8cede9", "publication_year": 2019, "sentences": ["previous work on multimodal machine translation has shown that visual information is only needed in very specific cases , for example in the presence of ambiguous words where the textual context is not sufficient .", "as a consequence , models tend to learn to ignore this information .", "we propose a translate - and - refine approach to this problem where images are only used by a second stage decoder .", "this approach is trained jointly to generate a good first draft translation and to improve over this draft by ( i ) making better use of the target language textual context ( both left and right - side contexts ) and ( ii ) making use of visual context .", "this approach leads to the state of the art results .", "additionally , we show that it has the ability to recover from erroneous or missing words in the source language ."], "events": [{"event_type": "ITT", "arguments": [{"text": "multimodal machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multimodal", "machine", "translation"], "offsets": [3, 4, 5]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "multimodal machine translation", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["multimodal", "machine", "translation"], "offsets": [3, 4, 5]}, {"text": "visual information", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["visual", "information"], "offsets": [9, 10]}], "trigger": {"text": "ignore", "tokens": ["ignore"], "offsets": [44]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [48]}, {"text": "translate - and - refine approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["translate", "-", "and", "-", "refine", "approach"], "offsets": [51, 52, 53, 54, 55, 56]}, {"text": "this problem", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["this", "problem"], "offsets": [58, 59]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [49]}}, {"event_type": "MDS", "arguments": [{"text": "translate - and - refine approach", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["translate", "-", "and", "-", "refine", "approach"], "offsets": [51, 52, 53, 54, 55, 56]}, {"text": "good first draft translation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["good", "first", "draft", "translation"], "offsets": [79, 80, 81, 82]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [77]}}, {"event_type": "WKS", "arguments": [{"text": "target language textual context", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["target", "language", "textual", "context"], "offsets": [98, 99, 100, 101]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [85]}], "trigger": {"text": "making better use", "tokens": ["making", "better", "use"], "offsets": [93, 94, 95]}}, {"event_type": "PUR", "arguments": [{"text": "this draft", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["good", "first", "draft", "translation"], "offsets": [79, 80, 81, 82]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [85]}}, {"event_type": "WKS", "arguments": [{"text": "visual context", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["visual", "context"], "offsets": [118, 119]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [85]}], "trigger": {"text": "making use", "tokens": ["making", "use"], "offsets": [115, 116]}}, {"event_type": "FAC", "arguments": [{"text": "translate - and - refine approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["translate", "-", "and", "-", "refine", "approach"], "offsets": [51, 52, 53, 54, 55, 56]}, {"text": "state of the art results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "of", "the", "art", "results"], "offsets": [126, 127, 128, 129, 130]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [123]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [134]}, {"text": "recover", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["recover"], "offsets": [142]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [135]}}, {"event_type": "FAC", "arguments": [{"text": "translate - and - refine approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["translate", "-", "and", "-", "refine", "approach"], "offsets": [51, 52, 53, 54, 55, 56]}, {"text": "in the source language", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "source", "language"], "offsets": [148, 149, 150, 151]}, {"text": "erroneous words", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["erroneous", "words"], "offsets": [144, 147]}, {"text": "missing words", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["missing", "words"], "offsets": [146, 147]}], "trigger": {"text": "recover", "tokens": ["recover"], "offsets": [142]}}], "document": ["previous", "work", "on", "multimodal", "machine", "translation", "has", "shown", "that", "visual", "information", "is", "only", "needed", "in", "very", "specific", "cases", ",", "for", "example", "in", "the", "presence", "of", "ambiguous", "words", "where", "the", "textual", "context", "is", "not", "sufficient", ".", "as", "a", "consequence", ",", "models", "tend", "to", "learn", "to", "ignore", "this", "information", ".", "we", "propose", "a", "translate", "-", "and", "-", "refine", "approach", "to", "this", "problem", "where", "images", "are", "only", "used", "by", "a", "second", "stage", "decoder", ".", "this", "approach", "is", "trained", "jointly", "to", "generate", "a", "good", "first", "draft", "translation", "and", "to", "improve", "over", "this", "draft", "by", "(", "i", ")", "making", "better", "use", "of", "the", "target", "language", "textual", "context", "(", "both", "left", "and", "right", "-", "side", "contexts", ")", "and", "(", "ii", ")", "making", "use", "of", "visual", "context", ".", "this", "approach", "leads", "to", "the", "state", "of", "the", "art", "results", ".", "additionally", ",", "we", "show", "that", "it", "has", "the", "ability", "to", "recover", "from", "erroneous", "or", "missing", "words", "in", "the", "source", "language", "."]}, {"venue": "ACL", "title": "Exploring Numeracy in Word Embeddings", "abstract": "Word embeddings are now pervasive across NLP subfields as the de-facto method of forming text representataions. In this work, we show that existing embedding models are inadequate at constructing representations that capture salient aspects of mathematical meaning for numbers, which is important for language understanding. Numbers are ubiquitous and frequently appear in text. Inspired by cognitive studies on how humans perceive numbers, we develop an analysis framework to test how well word embeddings capture two essential properties of numbers: magnitude (e.g. 3<4) and numeration (e.g. 3=three). Our experiments reveal that most models capture an approximate notion of magnitude, but are inadequate at capturing numeration. We hope that our observations provide a starting point for the development of methods which better capture numeracy in NLP systems.", "doc_id": "587d47553dc9abb9fb06b333f7f5bbfc", "publication_year": 2019, "sentences": ["word embeddings are now pervasive across nlp subfields as the de - facto method of forming text representataions .", "in this work , we show that existing embedding models are inadequate at constructing representations that capture salient aspects of mathematical meaning for numbers , which is important for language understanding .", "numbers are ubiquitous and frequently appear in text .", "inspired by cognitive studies on how humans perceive numbers , we develop an analysis framework to test how well word embeddings capture two essential properties of numbers : magnitude ( e . g . 3 < 4 ) and numeration ( e . g . 3 = three ) .", "our experiments reveal that most models capture an approximate notion of magnitude , but are inadequate at capturing numeration .", "we hope that our observations provide a starting point for the development of methods which better capture numeracy in nlp systems ."], "events": [{"event_type": "ITT", "arguments": [{"text": "word embeddings", "nugget_type": "APP", "argument_type": "Target", "tokens": ["word", "embeddings"], "offsets": [0, 1]}], "trigger": {"text": "pervasive", "tokens": ["pervasive"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "existing embedding models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "embedding", "models"], "offsets": [26, 27, 28]}, {"text": "inadequate", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["inadequate"], "offsets": [30]}], "trigger": {"text": "inadequate", "tokens": ["inadequate"], "offsets": [30]}}, {"event_type": "PUR", "arguments": [{"text": "representations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["representations"], "offsets": [33]}], "trigger": {"text": "constructing", "tokens": ["constructing"], "offsets": [32]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [70]}, {"text": "analysis framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["analysis", "framework"], "offsets": [73, 74]}, {"text": "test", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["test"], "offsets": [76]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [71]}}, {"event_type": "PUR", "arguments": [{"text": "two essential properties of numbers", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["two", "essential", "properties", "of", "numbers"], "offsets": [82, 83, 84, 85, 86]}], "trigger": {"text": "test", "tokens": ["test"], "offsets": [76]}}, {"event_type": "FAC", "arguments": [{"text": "approximate notion of magnitude", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["approximate", "notion", "of", "magnitude"], "offsets": [118, 119, 120, 121]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [116]}}], "document": ["word", "embeddings", "are", "now", "pervasive", "across", "nlp", "subfields", "as", "the", "de", "-", "facto", "method", "of", "forming", "text", "representataions", ".", "in", "this", "work", ",", "we", "show", "that", "existing", "embedding", "models", "are", "inadequate", "at", "constructing", "representations", "that", "capture", "salient", "aspects", "of", "mathematical", "meaning", "for", "numbers", ",", "which", "is", "important", "for", "language", "understanding", ".", "numbers", "are", "ubiquitous", "and", "frequently", "appear", "in", "text", ".", "inspired", "by", "cognitive", "studies", "on", "how", "humans", "perceive", "numbers", ",", "we", "develop", "an", "analysis", "framework", "to", "test", "how", "well", "word", "embeddings", "capture", "two", "essential", "properties", "of", "numbers", ":", "magnitude", "(", "e", ".", "g", ".", "3", "<", "4", ")", "and", "numeration", "(", "e", ".", "g", ".", "3", "=", "three", ")", ".", "our", "experiments", "reveal", "that", "most", "models", "capture", "an", "approximate", "notion", "of", "magnitude", ",", "but", "are", "inadequate", "at", "capturing", "numeration", ".", "we", "hope", "that", "our", "observations", "provide", "a", "starting", "point", "for", "the", "development", "of", "methods", "which", "better", "capture", "numeracy", "in", "nlp", "systems", "."]}, {"venue": "ACL", "title": "Letters From the Past: Modeling Historical Sound Change Through Diachronic Character Embeddings", "abstract": "While a great deal of work has been done on NLP approaches to lexical semantic change detection, other aspects of language change have received less attention from the NLP community. In this paper, we address the detection of sound change through historical spelling. We propose that a sound change can be captured by comparing the relative distance through time between the distributions of the characters involved before and after the change has taken place. We model these distributions using PPMI character embeddings. We verify this hypothesis in synthetic data and then test the method\u2019s ability to trace the well-known historical change of lenition of plosives in Danish historical sources. We show that the models are able to identify several of the changes under consideration and to uncover meaningful contexts in which they appeared. The methodology has the potential to contribute to the study of open questions such as the relative chronology of sound shifts and their geographical distribution.", "doc_id": "4308e8cab7a5b19acfd8915a300e9e40", "publication_year": 2022, "sentences": ["while a great deal of work has been done on nlp approaches to lexical semantic change detection , other aspects of language change have received less attention from the nlp community .", "in this paper , we address the detection of sound change through historical spelling .", "we propose that a sound change can be captured by comparing the relative distance through time between the distributions of the characters involved before and after the change has taken place .", "we model these distributions using ppmi character embeddings .", "we verify this hypothesis in synthetic data and then test the method \u2019 s ability to trace the well - known historical change of lenition of plosives in danish historical sources .", "we show that the models are able to identify several of the changes under consideration and to uncover meaningful contexts in which they appeared .", "the methodology has the potential to contribute to the study of open questions such as the relative chronology of sound shifts and their geographical distribution ."], "events": [{"event_type": "RWF", "arguments": [{"text": "less attention", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["less", "attention"], "offsets": [25, 26]}, {"text": "other aspects of language change", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["other", "aspects", "of", "language", "change"], "offsets": [18, 19, 20, 21, 22]}], "trigger": {"text": "received", "tokens": ["received"], "offsets": [24]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [36]}, {"text": "historical spelling", "nugget_type": "APP", "argument_type": "Content", "tokens": ["historical", "spelling"], "offsets": [44, 45]}, {"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [37]}], "trigger": {"text": "through", "tokens": ["through"], "offsets": [43]}}, {"event_type": "PUR", "arguments": [{"text": "detection of sound change", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["detection", "of", "sound", "change"], "offsets": [39, 40, 41, 42]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [37]}}, {"event_type": "MDS", "arguments": [{"text": "ppmi character embeddings", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["ppmi", "character", "embeddings"], "offsets": [84, 85, 86]}, {"text": "model", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["model"], "offsets": [80]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [83]}}, {"event_type": "PUR", "arguments": [{"text": "distributions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["distributions"], "offsets": [82]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [80]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [88]}, {"text": "hypothesis", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["hypothesis"], "offsets": [91]}, {"text": "synthetic data", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["synthetic", "data"], "offsets": [93, 94]}], "trigger": {"text": "verify", "tokens": ["verify"], "offsets": [89]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [88]}, {"text": "method \u2019 s ability", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["method", "\u2019", "s", "ability"], "offsets": [99, 100, 101, 102]}, {"text": "trace", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["trace"], "offsets": [104]}], "trigger": {"text": "test", "tokens": ["test"], "offsets": [97]}}, {"event_type": "PUR", "arguments": [{"text": "in danish historical sources", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "danish", "historical", "sources"], "offsets": [115, 116, 117, 118]}, {"text": "well - known historical change of lenition of plosives", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["well", "-", "known", "historical", "change", "of", "lenition", "of", "plosives"], "offsets": [106, 107, 108, 109, 110, 111, 112, 113, 114]}], "trigger": {"text": "trace", "tokens": ["trace"], "offsets": [104]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [120]}, {"text": "identify", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["identify"], "offsets": [128]}, {"text": "uncover", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["uncover"], "offsets": [137]}, {"text": "has", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["has"], "offsets": [147]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [121]}}, {"event_type": "FAC", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["models"], "offsets": [124]}, {"text": "changes under consideration", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["changes", "under", "consideration"], "offsets": [132, 133, 134]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [128]}}, {"event_type": "FAC", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["models"], "offsets": [124]}, {"text": "meaningful contexts", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["meaningful", "contexts"], "offsets": [138, 139]}], "trigger": {"text": "uncover", "tokens": ["uncover"], "offsets": [137]}}, {"event_type": "FAC", "arguments": [{"text": "methodology", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["methodology"], "offsets": [146]}, {"text": "potential", "nugget_type": "STR", "argument_type": "Object", "tokens": ["potential"], "offsets": [149]}, {"text": "contribute", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["contribute"], "offsets": [151]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [147]}}, {"event_type": "PUR", "arguments": [{"text": "study of open questions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["study", "of", "open", "questions"], "offsets": [154, 155, 156, 157]}], "trigger": {"text": "contribute", "tokens": ["contribute"], "offsets": [151]}}, {"event_type": "MDS", "arguments": [{"text": "captured", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["captured"], "offsets": [55]}, {"text": "relative distance through time", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["relative", "distance", "through", "time"], "offsets": [59, 60, 61, 62]}], "trigger": {"text": "comparing", "tokens": ["comparing"], "offsets": [57]}}, {"event_type": "PUR", "arguments": [{"text": "sound change", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["sound", "change"], "offsets": [51, 52]}], "trigger": {"text": "captured", "tokens": ["captured"], "offsets": [55]}}], "document": ["while", "a", "great", "deal", "of", "work", "has", "been", "done", "on", "nlp", "approaches", "to", "lexical", "semantic", "change", "detection", ",", "other", "aspects", "of", "language", "change", "have", "received", "less", "attention", "from", "the", "nlp", "community", ".", "in", "this", "paper", ",", "we", "address", "the", "detection", "of", "sound", "change", "through", "historical", "spelling", ".", "we", "propose", "that", "a", "sound", "change", "can", "be", "captured", "by", "comparing", "the", "relative", "distance", "through", "time", "between", "the", "distributions", "of", "the", "characters", "involved", "before", "and", "after", "the", "change", "has", "taken", "place", ".", "we", "model", "these", "distributions", "using", "ppmi", "character", "embeddings", ".", "we", "verify", "this", "hypothesis", "in", "synthetic", "data", "and", "then", "test", "the", "method", "\u2019", "s", "ability", "to", "trace", "the", "well", "-", "known", "historical", "change", "of", "lenition", "of", "plosives", "in", "danish", "historical", "sources", ".", "we", "show", "that", "the", "models", "are", "able", "to", "identify", "several", "of", "the", "changes", "under", "consideration", "and", "to", "uncover", "meaningful", "contexts", "in", "which", "they", "appeared", ".", "the", "methodology", "has", "the", "potential", "to", "contribute", "to", "the", "study", "of", "open", "questions", "such", "as", "the", "relative", "chronology", "of", "sound", "shifts", "and", "their", "geographical", "distribution", "."]}, {"venue": "ACL", "title": "Logic-Guided Data Augmentation and Regularization for Consistent Question Answering", "abstract": "Many natural language questions require qualitative, quantitative or logical comparisons between two entities or events. This paper addresses the problem of improving the accuracy and consistency of responses to comparison questions by integrating logic rules and neural models. Our method leverages logical and linguistic knowledge to augment labeled training data and then uses a consistency-based regularizer to train the model. Improving the global consistency of predictions, our approach achieves large improvements over previous methods in a variety of question answering (QA) tasks, including multiple-choice qualitative reasoning, cause-effect reasoning, and extractive machine reading comprehension. In particular, our method significantly improves the performance of RoBERTa-based models by 1-5% across datasets. We advance state of the art by around 5-8% on WIQA and QuaRel and reduce consistency violations by 58% on HotpotQA. We further demonstrate that our approach can learn effectively from limited data.", "doc_id": "811a5e4f0012f9c88a079b33bf80268b", "publication_year": 2020, "sentences": ["many natural language questions require qualitative , quantitative or logical comparisons between two entities or events .", "this paper addresses the problem of improving the accuracy and consistency of responses to comparison questions by integrating logic rules and neural models .", "our method leverages logical and linguistic knowledge to augment labeled training data and then uses a consistency - based regularizer to train the model .", "improving the global consistency of predictions , our approach achieves large improvements over previous methods in a variety of question answering ( qa ) tasks , including multiple - choice qualitative reasoning , cause - effect reasoning , and extractive machine reading comprehension .", "in particular , our method significantly improves the performance of roberta - based models by 1 - 5 % across datasets .", "we advance state of the art by around 5 - 8 % on wiqa and quarel and reduce consistency violations by 58 % on hotpotqa .", "we further demonstrate that our approach can learn effectively from limited data ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language questions", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "questions"], "offsets": [1, 2, 3]}], "trigger": {"text": "require", "tokens": ["require"], "offsets": [4]}}, {"event_type": "WKS", "arguments": [{"text": "logic rules", "nugget_type": "APP", "argument_type": "Content", "tokens": ["logic", "rules"], "offsets": [35, 36]}, {"text": "neural models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neural", "models"], "offsets": [38, 39]}, {"text": "improving", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improving"], "offsets": [23]}], "trigger": {"text": "integrating", "tokens": ["integrating"], "offsets": [34]}}, {"event_type": "PUR", "arguments": [{"text": "accuracy of responses to comparison questions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["accuracy", "of", "responses", "to", "comparison", "questions"], "offsets": [25, 28, 29, 30, 31, 32]}, {"text": "consistency of responses to comparison questions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["consistency", "of", "responses", "to", "comparison", "questions"], "offsets": [27, 28, 29, 30, 31, 32]}], "trigger": {"text": "improving", "tokens": ["improving"], "offsets": [23]}}, {"event_type": "WKS", "arguments": [{"text": "augment", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["augment"], "offsets": [49]}, {"text": "logical and linguistic knowledge", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["logical", "and", "linguistic", "knowledge"], "offsets": [44, 45, 46, 47]}], "trigger": {"text": "leverages", "tokens": ["leverages"], "offsets": [43]}}, {"event_type": "PUR", "arguments": [{"text": "labeled training data", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["labeled", "training", "data"], "offsets": [50, 51, 52]}], "trigger": {"text": "augment", "tokens": ["augment"], "offsets": [49]}}, {"event_type": "WKS", "arguments": [{"text": "consistency - based regularizer", "nugget_type": "APP", "argument_type": "Content", "tokens": ["consistency", "-", "based", "regularizer"], "offsets": [57, 58, 59, 60]}, {"text": "train", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["train"], "offsets": [62]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [55]}}, {"event_type": "PUR", "arguments": [{"text": "model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["model"], "offsets": [64]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [62]}}, {"event_type": "WKS", "arguments": [{"text": "global consistency of predictions", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["global", "consistency", "of", "predictions"], "offsets": [68, 69, 70, 71]}], "trigger": {"text": "improving", "tokens": ["improving"], "offsets": [66]}}, {"event_type": "CMP", "arguments": [{"text": "approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["approach"], "offsets": [74]}, {"text": "previous methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "methods"], "offsets": [79, 80]}, {"text": "large", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["large"], "offsets": [76]}, {"text": "improvements", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["improvements"], "offsets": [77]}, {"text": "in a variety of question answering ( qa ) tasks , including multiple - choice qualitative reasoning , cause - effect reasoning , and extractive machine reading comprehension", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "variety", "of", "question", "answering", "(", "qa", ")", "tasks", ",", "including", "multiple", "-", "choice", "qualitative", "reasoning", ",", "cause", "-", "effect", "reasoning", ",", "and", "extractive", "machine", "reading", "comprehension"], "offsets": [81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [75]}}, {"event_type": "CMP", "arguments": [{"text": "method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["method"], "offsets": [114]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [115]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [116]}, {"text": "performance of roberta - based models", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance", "of", "roberta", "-", "based", "models"], "offsets": [118, 119, 120, 121, 122, 123]}, {"text": "1 - 5 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["1", "-", "5", "%"], "offsets": [125, 126, 127, 128]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [116]}}, {"event_type": "CMP", "arguments": [{"text": "method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["method"], "offsets": [114]}, {"text": "advance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["advance"], "offsets": [133]}, {"text": "state of the art", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["state", "of", "the", "art"], "offsets": [134, 135, 136, 137]}, {"text": "around 5 - 8 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["around", "5", "-", "8", "%"], "offsets": [139, 140, 141, 142, 143]}], "trigger": {"text": "advance", "tokens": ["advance"], "offsets": [133]}}, {"event_type": "CMP", "arguments": [{"text": "method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["method"], "offsets": [114]}, {"text": "reduce", "nugget_type": "STR", "argument_type": "Result", "tokens": ["reduce"], "offsets": [149]}, {"text": "consistency violations", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["consistency", "violations"], "offsets": [150, 151]}, {"text": "58 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["58", "%"], "offsets": [153, 154]}], "trigger": {"text": "reduce", "tokens": ["reduce"], "offsets": [149]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [158]}, {"text": "learn", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["learn"], "offsets": [165]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [160]}}, {"event_type": "FAC", "arguments": [{"text": "approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["approach"], "offsets": [163]}, {"text": "effectively", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["effectively"], "offsets": [166]}, {"text": "from limited data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "limited", "data"], "offsets": [167, 168, 169]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [165]}}], "document": ["many", "natural", "language", "questions", "require", "qualitative", ",", "quantitative", "or", "logical", "comparisons", "between", "two", "entities", "or", "events", ".", "this", "paper", "addresses", "the", "problem", "of", "improving", "the", "accuracy", "and", "consistency", "of", "responses", "to", "comparison", "questions", "by", "integrating", "logic", "rules", "and", "neural", "models", ".", "our", "method", "leverages", "logical", "and", "linguistic", "knowledge", "to", "augment", "labeled", "training", "data", "and", "then", "uses", "a", "consistency", "-", "based", "regularizer", "to", "train", "the", "model", ".", "improving", "the", "global", "consistency", "of", "predictions", ",", "our", "approach", "achieves", "large", "improvements", "over", "previous", "methods", "in", "a", "variety", "of", "question", "answering", "(", "qa", ")", "tasks", ",", "including", "multiple", "-", "choice", "qualitative", "reasoning", ",", "cause", "-", "effect", "reasoning", ",", "and", "extractive", "machine", "reading", "comprehension", ".", "in", "particular", ",", "our", "method", "significantly", "improves", "the", "performance", "of", "roberta", "-", "based", "models", "by", "1", "-", "5", "%", "across", "datasets", ".", "we", "advance", "state", "of", "the", "art", "by", "around", "5", "-", "8", "%", "on", "wiqa", "and", "quarel", "and", "reduce", "consistency", "violations", "by", "58", "%", "on", "hotpotqa", ".", "we", "further", "demonstrate", "that", "our", "approach", "can", "learn", "effectively", "from", "limited", "data", "."]}, {"venue": "ACL", "title": "Data Contamination: From Memorization to Exploitation", "abstract": "Pretrained language models are typically trained on massive web-based datasets, which are often \u201ccontaminated\u201d with downstream test sets. It is not clear to what extent models exploit the contaminated data for downstream tasks. We present a principled method to study this question. We pretrain BERT models on joint corpora of Wikipedia and labeled downstream datasets, and fine-tune them on the relevant task. Comparing performance between samples seen and unseen during pretraining enables us to define and quantify levels of memorization and exploitation.Experiments with two models and three downstream tasks show that exploitation exists in some cases, but in others the models memorize the contaminated data, but do not exploit it. We show that these two measures are affected by different factors such as the number of duplications of the contaminated data and the model size. Our results highlight the importance of analyzing massive web-scale datasets to verify that progress in NLP is obtained by better language understanding and not better data exploitation.", "doc_id": "45c93ab2e823972b4c8cded90d76c886", "publication_year": 2022, "sentences": ["pretrained language models are typically trained on massive web - based datasets , which are often \u201c contaminated \u201d with downstream test sets .", "it is not clear to what extent models exploit the contaminated data for downstream tasks .", "we present a principled method to study this question .", "we pretrain bert models on joint corpora of wikipedia and labeled downstream datasets , and fine - tune them on the relevant task .", "comparing performance between samples seen and unseen during pretraining enables us to define and quantify levels of memorization and exploitation .", "experiments with two models and three downstream tasks show that exploitation exists in some cases , but in others the models memorize the contaminated data , but do not exploit it .", "we show that these two measures are affected by different factors such as the number of duplications of the contaminated data and the model size .", "our results highlight the importance of analyzing massive web - scale datasets to verify that progress in nlp is obtained by better language understanding and not better data exploitation ."], "events": [{"event_type": "RWF", "arguments": [{"text": "pretrained language models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["pretrained", "language", "models"], "offsets": [0, 1, 2]}, {"text": "often", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["often"], "offsets": [15]}, {"text": "contaminated", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["contaminated"], "offsets": [17]}], "trigger": {"text": "contaminated", "tokens": ["contaminated"], "offsets": [17]}}, {"event_type": "ITT", "arguments": [{"text": "pretrained language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["pretrained", "language", "models"], "offsets": [0, 1, 2]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [5]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [40]}, {"text": "principled method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["principled", "method"], "offsets": [43, 44]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [41]}}, {"event_type": "WKS", "arguments": [{"text": "performance", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["performance"], "offsets": [75]}, {"text": "define and quantify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["define", "and", "quantify"], "offsets": [86, 87, 88]}, {"text": "between samples seen and unseen during pretraining", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "samples", "seen", "and", "unseen", "during", "pretraining"], "offsets": [76, 77, 78, 79, 80, 81, 82]}], "trigger": {"text": "comparing", "tokens": ["comparing"], "offsets": [74]}}, {"event_type": "PUR", "arguments": [{"text": "levels of memorization", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["levels", "of", "memorization"], "offsets": [89, 90, 91]}, {"text": "levels of exploitation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["levels", "of", "exploitation"], "offsets": [89, 90, 93]}], "trigger": {"text": "define and quantify", "tokens": ["define", "and", "quantify"], "offsets": [86, 87, 88]}}, {"event_type": "FAC", "arguments": [{"text": "exploitation", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["exploitation"], "offsets": [105]}, {"text": "in some cases", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "some", "cases"], "offsets": [107, 108, 109]}], "trigger": {"text": "exists", "tokens": ["exists"], "offsets": [106]}}, {"event_type": "FAC", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["models"], "offsets": [115]}, {"text": "contaminated data", "nugget_type": "DST", "argument_type": "Object", "tokens": ["contaminated", "data"], "offsets": [118, 119]}], "trigger": {"text": "memorize", "tokens": ["memorize"], "offsets": [116]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [127]}, {"text": "affected", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["affected"], "offsets": [134]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [128]}}, {"event_type": "FAC", "arguments": [{"text": "different factors", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["different", "factors"], "offsets": [136, 137]}, {"text": "two measures", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["two", "measures"], "offsets": [131, 132]}], "trigger": {"text": "affected", "tokens": ["affected"], "offsets": [134]}}, {"event_type": "FAC", "arguments": [{"text": "importance of analyzing massive web - scale datasets", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["importance", "of", "analyzing", "massive", "web", "-", "scale", "datasets"], "offsets": [157, 158, 159, 160, 161, 162, 163, 164]}], "trigger": {"text": "highlight", "tokens": ["highlight"], "offsets": [155]}}, {"event_type": "FAC", "arguments": [{"text": "better language understanding", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["better", "language", "understanding"], "offsets": [174, 175, 176]}, {"text": "progress in nlp", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["progress", "in", "nlp"], "offsets": [168, 169, 170]}], "trigger": {"text": "obtained", "tokens": ["obtained"], "offsets": [172]}}, {"event_type": "MDS", "arguments": [{"text": "bert models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["bert", "models"], "offsets": [52, 53]}, {"text": "joint corpora of wikipedia", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["joint", "corpora", "of", "wikipedia"], "offsets": [55, 56, 57, 58]}], "trigger": {"text": "pretrain", "tokens": ["pretrain"], "offsets": [51]}}, {"event_type": "MDS", "arguments": [{"text": "downstream datasets", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["downstream", "datasets"], "offsets": [61, 62]}], "trigger": {"text": "labeled", "tokens": ["labeled"], "offsets": [60]}}, {"event_type": "MDS", "arguments": [{"text": "downstream datasets", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["downstream", "datasets"], "offsets": [61, 62]}, {"text": "on the relevant task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "relevant", "task"], "offsets": [69, 70, 71, 72]}], "trigger": {"text": "fine - tune", "tokens": ["fine", "-", "tune"], "offsets": [65, 66, 67]}}, {"event_type": "FIN", "arguments": [{"text": "exists", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["exists"], "offsets": [106]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [103]}}], "document": ["pretrained", "language", "models", "are", "typically", "trained", "on", "massive", "web", "-", "based", "datasets", ",", "which", "are", "often", "\u201c", "contaminated", "\u201d", "with", "downstream", "test", "sets", ".", "it", "is", "not", "clear", "to", "what", "extent", "models", "exploit", "the", "contaminated", "data", "for", "downstream", "tasks", ".", "we", "present", "a", "principled", "method", "to", "study", "this", "question", ".", "we", "pretrain", "bert", "models", "on", "joint", "corpora", "of", "wikipedia", "and", "labeled", "downstream", "datasets", ",", "and", "fine", "-", "tune", "them", "on", "the", "relevant", "task", ".", "comparing", "performance", "between", "samples", "seen", "and", "unseen", "during", "pretraining", "enables", "us", "to", "define", "and", "quantify", "levels", "of", "memorization", "and", "exploitation", ".", "experiments", "with", "two", "models", "and", "three", "downstream", "tasks", "show", "that", "exploitation", "exists", "in", "some", "cases", ",", "but", "in", "others", "the", "models", "memorize", "the", "contaminated", "data", ",", "but", "do", "not", "exploit", "it", ".", "we", "show", "that", "these", "two", "measures", "are", "affected", "by", "different", "factors", "such", "as", "the", "number", "of", "duplications", "of", "the", "contaminated", "data", "and", "the", "model", "size", ".", "our", "results", "highlight", "the", "importance", "of", "analyzing", "massive", "web", "-", "scale", "datasets", "to", "verify", "that", "progress", "in", "nlp", "is", "obtained", "by", "better", "language", "understanding", "and", "not", "better", "data", "exploitation", "."]}, {"venue": "ACL", "title": "\u201cWho said it, and Why?\u201d Provenance for Natural Language Claims", "abstract": "In an era where generating content and publishing it is so easy, we are bombarded with information and are exposed to all kinds of claims, some of which do not always rank high on the truth scale. This paper suggests that the key to a longer-term, holistic, and systematic approach to navigating this information pollution is capturing the provenance of claims. To do that, we develop a formal definition of provenance graph for a given natural language claim, aiming to understand where the claim may come from and how it has evolved. To construct the graph, we model provenance inference, formulated mainly as an information extraction task and addressed via a textual entailment model. We evaluate our approach using two benchmark datasets, showing initial success in capturing the notion of provenance and its effectiveness on the application of claim verification.", "doc_id": "ed8032b7de87ab8c731debdce68da16c", "publication_year": 2020, "sentences": ["in an era where generating content and publishing it is so easy , we are bombarded with information and are exposed to all kinds of claims , some of which do not always rank high on the truth scale .", "this paper suggests that the key to a longer - term , holistic , and systematic approach to navigating this information pollution is capturing the provenance of claims .", "to do that , we develop a formal definition of provenance graph for a given natural language claim , aiming to understand where the claim may come from and how it has evolved .", "to construct the graph , we model provenance inference , formulated mainly as an information extraction task and addressed via a textual entailment model .", "we evaluate our approach using two benchmark datasets , showing initial success in capturing the notion of provenance and its effectiveness on the application of claim verification ."], "events": [{"event_type": "ITT", "arguments": [{"text": "provenance of claims", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["provenance", "of", "claims"], "offsets": [65, 66, 67]}], "trigger": {"text": "capturing", "tokens": ["capturing"], "offsets": [63]}}, {"event_type": "PUR", "arguments": [{"text": "where the claim may come from", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["where", "the", "claim", "may", "come", "from"], "offsets": [91, 92, 93, 94, 95, 96]}, {"text": "how it has evolved", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["how", "it", "has", "evolved"], "offsets": [98, 99, 100, 101]}], "trigger": {"text": "understand", "tokens": ["understand"], "offsets": [90]}}, {"event_type": "MDS", "arguments": [{"text": "formal definition of provenance graph", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["formal", "definition", "of", "provenance", "graph"], "offsets": [76, 77, 78, 79, 80]}, {"text": "given natural language claim", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["given", "natural", "language", "claim"], "offsets": [83, 84, 85, 86]}, {"text": "understand", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["understand"], "offsets": [90]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [74]}}, {"event_type": "MDS", "arguments": [{"text": "provenance inference", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["provenance", "inference"], "offsets": [110, 111]}, {"text": "construct", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["construct"], "offsets": [104]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [109]}}, {"event_type": "PUR", "arguments": [{"text": "graph", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["graph"], "offsets": [106]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [104]}}, {"event_type": "MDS", "arguments": [{"text": "textual entailment model", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["textual", "entailment", "model"], "offsets": [124, 125, 126]}, {"text": "information extraction task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["information", "extraction", "task"], "offsets": [117, 118, 119]}], "trigger": {"text": "addressed", "tokens": ["addressed"], "offsets": [121]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [128]}, {"text": "two benchmark datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["two", "benchmark", "datasets"], "offsets": [133, 134, 135]}, {"text": "evaluate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluate"], "offsets": [129]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [132]}}, {"event_type": "PUR", "arguments": [{"text": "approach", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["formal", "definition", "of", "provenance", "graph"], "offsets": [76, 77, 78, 79, 80]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [129]}}, {"event_type": "FAC", "arguments": [{"text": "in capturing the notion of provenance", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "capturing", "the", "notion", "of", "provenance"], "offsets": [140, 141, 142, 143, 144, 145]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["formal", "definition", "of", "provenance", "graph"], "offsets": [76, 77, 78, 79, 80]}, {"text": "initial success", "nugget_type": "STR", "argument_type": "Object", "tokens": ["initial", "success"], "offsets": [138, 139]}], "trigger": {"text": "showing", "tokens": ["showing"], "offsets": [137]}}, {"event_type": "FAC", "arguments": [{"text": "on the application of claim verification", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "application", "of", "claim", "verification"], "offsets": [149, 150, 151, 152, 153, 154]}, {"text": "effectiveness", "nugget_type": "STR", "argument_type": "Object", "tokens": ["effectiveness"], "offsets": [148]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["formal", "definition", "of", "provenance", "graph"], "offsets": [76, 77, 78, 79, 80]}], "trigger": {"text": "showing", "tokens": ["showing"], "offsets": [137]}}], "document": ["in", "an", "era", "where", "generating", "content", "and", "publishing", "it", "is", "so", "easy", ",", "we", "are", "bombarded", "with", "information", "and", "are", "exposed", "to", "all", "kinds", "of", "claims", ",", "some", "of", "which", "do", "not", "always", "rank", "high", "on", "the", "truth", "scale", ".", "this", "paper", "suggests", "that", "the", "key", "to", "a", "longer", "-", "term", ",", "holistic", ",", "and", "systematic", "approach", "to", "navigating", "this", "information", "pollution", "is", "capturing", "the", "provenance", "of", "claims", ".", "to", "do", "that", ",", "we", "develop", "a", "formal", "definition", "of", "provenance", "graph", "for", "a", "given", "natural", "language", "claim", ",", "aiming", "to", "understand", "where", "the", "claim", "may", "come", "from", "and", "how", "it", "has", "evolved", ".", "to", "construct", "the", "graph", ",", "we", "model", "provenance", "inference", ",", "formulated", "mainly", "as", "an", "information", "extraction", "task", "and", "addressed", "via", "a", "textual", "entailment", "model", ".", "we", "evaluate", "our", "approach", "using", "two", "benchmark", "datasets", ",", "showing", "initial", "success", "in", "capturing", "the", "notion", "of", "provenance", "and", "its", "effectiveness", "on", "the", "application", "of", "claim", "verification", "."]}, {"venue": "ACL", "title": "Language Embeddings for Typology and Cross-lingual Transfer Learning", "abstract": "Cross-lingual language tasks typically require a substantial amount of annotated data or parallel translation data. We explore whether language representations that capture relationships among languages can be learned and subsequently leveraged in cross-lingual tasks without the use of parallel data. We generate dense embeddings for 29 languages using a denoising autoencoder, and evaluate the embeddings using the World Atlas of Language Structures (WALS) and two extrinsic tasks in a zero-shot setting: cross-lingual dependency parsing and cross-lingual natural language inference.", "doc_id": "56db1321e6dbb2e4d5d002c4b0632cc7", "publication_year": 2021, "sentences": ["cross - lingual language tasks typically require a substantial amount of annotated data or parallel translation data .", "we explore whether language representations that capture relationships among languages can be learned and subsequently leveraged in cross - lingual tasks without the use of parallel data .", "we generate dense embeddings for 29 languages using a denoising autoencoder , and evaluate the embeddings using the world atlas of language structures ( wals ) and two extrinsic tasks in a zero - shot setting : cross - lingual dependency parsing and cross - lingual natural language inference ."], "events": [{"event_type": "ITT", "arguments": [{"text": "cross - lingual language tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["cross", "-", "lingual", "language", "tasks"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "require", "tokens": ["require"], "offsets": [6]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [18]}, {"text": "language representations", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["language", "representations"], "offsets": [21, 22]}, {"text": "in cross - lingual tasks without the use of parallel data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "cross", "-", "lingual", "tasks", "without", "the", "use", "of", "parallel", "data"], "offsets": [34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44]}, {"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [24]}], "trigger": {"text": "leveraged", "tokens": ["leveraged"], "offsets": [33]}}, {"event_type": "PUR", "arguments": [{"text": "relationships among languages", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["relationships", "among", "languages"], "offsets": [25, 26, 27]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [24]}}, {"event_type": "MDS", "arguments": [{"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [47]}, {"text": "29 languages", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["29", "languages"], "offsets": [51, 52]}, {"text": "denoising autoencoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["denoising", "autoencoder"], "offsets": [55, 56]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [53]}}, {"event_type": "WKS", "arguments": [{"text": "world atlas of language structures", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["world", "atlas", "of", "language", "structures"], "offsets": [64, 65, 66, 67, 68]}, {"text": "in a zero - shot setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "zero", "-", "shot", "setting"], "offsets": [76, 77, 78, 79, 80, 81]}, {"text": "evaluate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluate"], "offsets": [59]}, {"text": "two extrinsic tasks", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["two", "extrinsic", "tasks"], "offsets": [73, 74, 75]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [62]}}, {"event_type": "PUR", "arguments": [{"text": "embeddings", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["embeddings"], "offsets": [61]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [59]}}, {"event_type": "PUR", "arguments": [{"text": "dense embeddings", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["dense", "embeddings"], "offsets": [48, 49]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [47]}}], "document": ["cross", "-", "lingual", "language", "tasks", "typically", "require", "a", "substantial", "amount", "of", "annotated", "data", "or", "parallel", "translation", "data", ".", "we", "explore", "whether", "language", "representations", "that", "capture", "relationships", "among", "languages", "can", "be", "learned", "and", "subsequently", "leveraged", "in", "cross", "-", "lingual", "tasks", "without", "the", "use", "of", "parallel", "data", ".", "we", "generate", "dense", "embeddings", "for", "29", "languages", "using", "a", "denoising", "autoencoder", ",", "and", "evaluate", "the", "embeddings", "using", "the", "world", "atlas", "of", "language", "structures", "(", "wals", ")", "and", "two", "extrinsic", "tasks", "in", "a", "zero", "-", "shot", "setting", ":", "cross", "-", "lingual", "dependency", "parsing", "and", "cross", "-", "lingual", "natural", "language", "inference", "."]}, {"venue": "ACL", "title": "Stolen Probability: A Structural Weakness of Neural Language Models", "abstract": "Neural Network Language Models (NNLMs) generate probability distributions by applying a softmax function to a distance metric formed by taking the dot product of a prediction vector with all word vectors in a high-dimensional embedding space. The dot-product distance metric forms part of the inductive bias of NNLMs. Although NNLMs optimize well with this inductive bias, we show that this results in a sub-optimal ordering of the embedding space that structurally impoverishes some words at the expense of others when assigning probability. We present numerical, theoretical and empirical analyses which show that words on the interior of the convex hull in the embedding space have their probability bounded by the probabilities of the words on the hull.", "doc_id": "b377b052545bd9f682a884be00bc7e51", "publication_year": 2020, "sentences": ["neural network language models ( nnlms ) generate probability distributions by applying a softmax function to a distance metric formed by taking the dot product of a prediction vector with all word vectors in a high - dimensional embedding space .", "the dot - product distance metric forms part of the inductive bias of nnlms .", "although nnlms optimize well with this inductive bias , we show that this results in a sub - optimal ordering of the embedding space that structurally impoverishes some words at the expense of others when assigning probability .", "we present numerical , theoretical and empirical analyses which show that words on the interior of the convex hull in the embedding space have their probability bounded by the probabilities of the words on the hull ."], "events": [{"event_type": "RWF", "arguments": [{"text": "sub - optimal ordering of the embedding space", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["sub", "-", "optimal", "ordering", "of", "the", "embedding", "space"], "offsets": [72, 73, 74, 75, 76, 77, 78, 79]}], "trigger": {"text": "results", "tokens": ["results"], "offsets": [69]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [94]}, {"text": "numerical , theoretical and empirical analyses", "nugget_type": "APP", "argument_type": "Content", "tokens": ["numerical", ",", "theoretical", "and", "empirical", "analyses"], "offsets": [96, 97, 98, 99, 100, 101]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [95]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [94]}, {"text": "bounded", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["bounded"], "offsets": [120]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [103]}}, {"event_type": "FAC", "arguments": [{"text": "probabilities of the words on the hull", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["probabilities", "of", "the", "words", "on", "the", "hull"], "offsets": [123, 124, 125, 126, 127, 128, 129]}, {"text": "their probability", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["words", "on", "the", "interior", "of", "the", "convex", "hull", "in", "the", "embedding", "space", "probability"], "offsets": [105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 119]}], "trigger": {"text": "bounded", "tokens": ["bounded"], "offsets": [120]}}, {"event_type": "PUR", "arguments": [{"text": "probability distributions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["probability", "distributions"], "offsets": [8, 9]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "structurally impoverishes", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["structurally", "impoverishes"], "offsets": [81, 82]}, {"text": "words", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["words"], "offsets": [84]}], "trigger": {"text": "structurally impoverishes", "tokens": ["structurally", "impoverishes"], "offsets": [81, 82]}}, {"event_type": "RWS", "arguments": [{"text": "neural network language models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["neural", "network", "language", "models"], "offsets": [0, 1, 2, 3]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [7]}, {"text": "softmax function", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["softmax", "function"], "offsets": [13, 14]}, {"text": "distance metric", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["distance", "metric"], "offsets": [17, 18]}], "trigger": {"text": "applying", "tokens": ["applying"], "offsets": [11]}}, {"event_type": "RWS", "arguments": [{"text": "dot product of a prediction vector", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["dot", "product", "of", "a", "prediction", "vector"], "offsets": [23, 24, 25, 26, 27, 28]}, {"text": "all word vectors", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["all", "word", "vectors"], "offsets": [30, 31, 32]}, {"text": "in a high - dimensional embedding space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "high", "-", "dimensional", "embedding", "space"], "offsets": [33, 34, 35, 36, 37, 38, 39]}], "trigger": {"text": "taking", "tokens": ["taking"], "offsets": [21]}}], "document": ["neural", "network", "language", "models", "(", "nnlms", ")", "generate", "probability", "distributions", "by", "applying", "a", "softmax", "function", "to", "a", "distance", "metric", "formed", "by", "taking", "the", "dot", "product", "of", "a", "prediction", "vector", "with", "all", "word", "vectors", "in", "a", "high", "-", "dimensional", "embedding", "space", ".", "the", "dot", "-", "product", "distance", "metric", "forms", "part", "of", "the", "inductive", "bias", "of", "nnlms", ".", "although", "nnlms", "optimize", "well", "with", "this", "inductive", "bias", ",", "we", "show", "that", "this", "results", "in", "a", "sub", "-", "optimal", "ordering", "of", "the", "embedding", "space", "that", "structurally", "impoverishes", "some", "words", "at", "the", "expense", "of", "others", "when", "assigning", "probability", ".", "we", "present", "numerical", ",", "theoretical", "and", "empirical", "analyses", "which", "show", "that", "words", "on", "the", "interior", "of", "the", "convex", "hull", "in", "the", "embedding", "space", "have", "their", "probability", "bounded", "by", "the", "probabilities", "of", "the", "words", "on", "the", "hull", "."]}, {"venue": "ACL", "title": "FORTAP: Using Formulas for Numerical-Reasoning-Aware Table Pretraining", "abstract": "Tables store rich numerical data, but numerical reasoning over tables is still a challenge. In this paper, we find that the spreadsheet formula, a commonly used language to perform computations on numerical values in spreadsheets, is a valuable supervision for numerical reasoning in tables. Considering large amounts of spreadsheets available on the web, we propose FORTAP, the first exploration to leverage spreadsheet formulas for table pretraining. Two novel self-supervised pretraining objectives are derived from formulas, numerical reference prediction (NRP) and numerical calculation prediction (NCP). While our proposed objectives are generic for encoders, to better capture spreadsheet table layouts and structures, FORTAP is built upon TUTA, the first transformer-based method for spreadsheet table pretraining with tree attention. FORTAP outperforms state-of-the-art methods by large margins on three representative datasets of formula prediction, question answering, and cell type classification, showing the great potential of leveraging formulas for table pretraining.", "doc_id": "449716d2d2d2b70051b4b9778e0dc347", "publication_year": 2022, "sentences": ["tables store rich numerical data , but numerical reasoning over tables is still a challenge .", "in this paper , we find that the spreadsheet formula , a commonly used language to perform computations on numerical values in spreadsheets , is a valuable supervision for numerical reasoning in tables .", "considering large amounts of spreadsheets available on the web , we propose fortap , the first exploration to leverage spreadsheet formulas for table pretraining .", "two novel self - supervised pretraining objectives are derived from formulas , numerical reference prediction ( nrp ) and numerical calculation prediction ( ncp ) .", "while our proposed objectives are generic for encoders , to better capture spreadsheet table layouts and structures , fortap is built upon tuta , the first transformer - based method for spreadsheet table pretraining with tree attention .", "fortap outperforms state - of - the - art methods by large margins on three representative datasets of formula prediction , question answering , and cell type classification , showing the great potential of leveraging formulas for table pretraining ."], "events": [{"event_type": "ITT", "arguments": [{"text": "numerical reasoning over tables", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["numerical", "reasoning", "over", "tables"], "offsets": [7, 8, 9, 10]}], "trigger": {"text": "challenge", "tokens": ["challenge"], "offsets": [14]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [60]}, {"text": "fortap", "nugget_type": "APP", "argument_type": "Content", "tokens": ["fortap"], "offsets": [62]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [61]}}, {"event_type": "WKS", "arguments": [{"text": "fortap", "nugget_type": "APP", "argument_type": "Content", "tokens": ["fortap"], "offsets": [119]}, {"text": "upon tuta", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["upon", "tuta"], "offsets": [122, 123]}, {"text": "better capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["better", "capture"], "offsets": [111, 112]}], "trigger": {"text": "built", "tokens": ["built"], "offsets": [121]}}, {"event_type": "PUR", "arguments": [{"text": "spreadsheet table layouts", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["spreadsheet", "table", "layouts"], "offsets": [113, 114, 115]}, {"text": "spreadsheet table structures", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["spreadsheet", "table", "structures"], "offsets": [113, 114, 117]}], "trigger": {"text": "better capture", "tokens": ["better", "capture"], "offsets": [111, 112]}}, {"event_type": "CMP", "arguments": [{"text": "fortap", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["fortap"], "offsets": [139]}, {"text": "state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [141, 142, 143, 144, 145, 146, 147, 148]}, {"text": "large margins", "nugget_type": "STR", "argument_type": "Result", "tokens": ["large", "margins"], "offsets": [150, 151]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [140]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [140]}}, {"event_type": "FAC", "arguments": [{"text": "fortap", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["fortap"], "offsets": [139]}, {"text": "great potential", "nugget_type": "STR", "argument_type": "Object", "tokens": ["great", "potential"], "offsets": [170, 171]}, {"text": "leveraging", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["leveraging"], "offsets": [173]}], "trigger": {"text": "showing", "tokens": ["showing"], "offsets": [168]}}, {"event_type": "PUR", "arguments": [{"text": "formulas", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["formulas"], "offsets": [174]}, {"text": "for table pretraining", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "table", "pretraining"], "offsets": [175, 176, 177]}], "trigger": {"text": "leveraging", "tokens": ["leveraging"], "offsets": [173]}}, {"event_type": "RWF", "arguments": [{"text": "spreadsheet formula", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["spreadsheet", "formula"], "offsets": [24, 25]}, {"text": "valuable supervision", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["valuable", "supervision"], "offsets": [42, 43]}, {"text": "numerical reasoning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["numerical", "reasoning"], "offsets": [45, 46]}], "trigger": {"text": "valuable supervision", "tokens": ["valuable", "supervision"], "offsets": [42, 43]}}, {"event_type": "WKS", "arguments": [{"text": "spreadsheet formulas", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["spreadsheet", "formulas"], "offsets": [69, 70]}, {"text": "table pretraining", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["table", "pretraining"], "offsets": [72, 73]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [68]}}, {"event_type": "MDS", "arguments": [{"text": "two novel self - supervised pretraining objectives", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["two", "novel", "self", "-", "supervised", "pretraining", "objectives"], "offsets": [75, 76, 77, 78, 79, 80, 81]}, {"text": "numerical reference prediction", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["numerical", "reference", "prediction"], "offsets": [87, 88, 89]}, {"text": "numerical calculation prediction", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["numerical", "calculation", "prediction"], "offsets": [94, 95, 96]}], "trigger": {"text": "derived", "tokens": ["derived"], "offsets": [83]}}], "document": ["tables", "store", "rich", "numerical", "data", ",", "but", "numerical", "reasoning", "over", "tables", "is", "still", "a", "challenge", ".", "in", "this", "paper", ",", "we", "find", "that", "the", "spreadsheet", "formula", ",", "a", "commonly", "used", "language", "to", "perform", "computations", "on", "numerical", "values", "in", "spreadsheets", ",", "is", "a", "valuable", "supervision", "for", "numerical", "reasoning", "in", "tables", ".", "considering", "large", "amounts", "of", "spreadsheets", "available", "on", "the", "web", ",", "we", "propose", "fortap", ",", "the", "first", "exploration", "to", "leverage", "spreadsheet", "formulas", "for", "table", "pretraining", ".", "two", "novel", "self", "-", "supervised", "pretraining", "objectives", "are", "derived", "from", "formulas", ",", "numerical", "reference", "prediction", "(", "nrp", ")", "and", "numerical", "calculation", "prediction", "(", "ncp", ")", ".", "while", "our", "proposed", "objectives", "are", "generic", "for", "encoders", ",", "to", "better", "capture", "spreadsheet", "table", "layouts", "and", "structures", ",", "fortap", "is", "built", "upon", "tuta", ",", "the", "first", "transformer", "-", "based", "method", "for", "spreadsheet", "table", "pretraining", "with", "tree", "attention", ".", "fortap", "outperforms", "state", "-", "of", "-", "the", "-", "art", "methods", "by", "large", "margins", "on", "three", "representative", "datasets", "of", "formula", "prediction", ",", "question", "answering", ",", "and", "cell", "type", "classification", ",", "showing", "the", "great", "potential", "of", "leveraging", "formulas", "for", "table", "pretraining", "."]}, {"venue": "ACL", "title": "Phone Features Improve Speech Translation", "abstract": "End-to-end models for speech translation (ST) more tightly couple speech recognition (ASR) and machine translation (MT) than a traditional cascade of separate ASR and MT models, with simpler model architectures and the potential for reduced error propagation. Their performance is often assumed to be superior, though in many conditions this is not yet the case. We compare cascaded and end-to-end models across high, medium, and low-resource conditions, and show that cascades remain stronger baselines. Further, we introduce two methods to incorporate phone features into ST models. We show that these features improve both architectures, closing the gap between end-to-end models and cascades, and outperforming previous academic work \u2013 by up to 9 BLEU on our low-resource setting.", "doc_id": "95e5e05ae239b76501e6f00e503be008", "publication_year": 2020, "sentences": ["end - to - end models for speech translation ( st ) more tightly couple speech recognition ( asr ) and machine translation ( mt ) than a traditional cascade of separate asr and mt models , with simpler model architectures and the potential for reduced error propagation .", "their performance is often assumed to be superior , though in many conditions this is not yet the case .", "we compare cascaded and end - to - end models across high , medium , and low - resource conditions , and show that cascades remain stronger baselines .", "further , we introduce two methods to incorporate phone features into st models .", "we show that these features improve both architectures , closing the gap between end - to - end models and cascades , and outperforming previous academic work \u2013 by up to 9 bleu on our low - resource setting ."], "events": [{"event_type": "ITT", "arguments": [{"text": "end - to - end models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["end", "-", "to", "-", "end", "models"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "tightly couple", "tokens": ["tightly", "couple"], "offsets": [13, 14]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [69]}, {"text": "across high , medium , and low - resource conditions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "high", ",", "medium", ",", "and", "low", "-", "resource", "conditions"], "offsets": [79, 80, 81, 82, 83, 84, 85, 86, 87, 88]}, {"text": "cascaded and end - to - end models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["cascaded", "and", "end", "-", "to", "-", "end", "models"], "offsets": [71, 72, 73, 74, 75, 76, 77, 78]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [70]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [69]}, {"text": "remain", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["remain"], "offsets": [94]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [91]}}, {"event_type": "FAC", "arguments": [{"text": "cascades", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["cascades"], "offsets": [93]}, {"text": "stronger baselines", "nugget_type": "APP", "argument_type": "Object", "tokens": ["stronger", "baselines"], "offsets": [95, 96]}], "trigger": {"text": "remain", "tokens": ["remain"], "offsets": [94]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [100]}, {"text": "two methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "methods"], "offsets": [102, 103]}, {"text": "incorporate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["incorporate"], "offsets": [105]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [101]}}, {"event_type": "PUR", "arguments": [{"text": "phone features into st models", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["phone", "features", "into", "speech", "translation", "models"], "offsets": [106, 107, 108, 7, 8, 110]}], "trigger": {"text": "incorporate", "tokens": ["incorporate"], "offsets": [105]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [112]}, {"text": "closing", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["closing"], "offsets": [121]}, {"text": "outperforming", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforming"], "offsets": [135]}, {"text": "improve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["improve"], "offsets": [117]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [113]}}, {"event_type": "FAC", "arguments": [{"text": "two methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["two", "methods"], "offsets": [102, 103]}, {"text": "both architectures", "nugget_type": "APP", "argument_type": "Object", "tokens": ["both", "architectures"], "offsets": [118, 119]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [117]}}, {"event_type": "FAC", "arguments": [{"text": "two methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["two", "methods"], "offsets": [102, 103]}, {"text": "gap", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["gap"], "offsets": [123]}], "trigger": {"text": "closing", "tokens": ["closing"], "offsets": [121]}}, {"event_type": "CMP", "arguments": [{"text": "two methods", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["two", "methods"], "offsets": [102, 103]}, {"text": "outperforming", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforming"], "offsets": [135]}, {"text": "previous academic work", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "academic", "work"], "offsets": [136, 137, 138]}, {"text": "9", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["9"], "offsets": [143]}, {"text": "bleu", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["bleu"], "offsets": [144]}], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [135]}}], "document": ["end", "-", "to", "-", "end", "models", "for", "speech", "translation", "(", "st", ")", "more", "tightly", "couple", "speech", "recognition", "(", "asr", ")", "and", "machine", "translation", "(", "mt", ")", "than", "a", "traditional", "cascade", "of", "separate", "asr", "and", "mt", "models", ",", "with", "simpler", "model", "architectures", "and", "the", "potential", "for", "reduced", "error", "propagation", ".", "their", "performance", "is", "often", "assumed", "to", "be", "superior", ",", "though", "in", "many", "conditions", "this", "is", "not", "yet", "the", "case", ".", "we", "compare", "cascaded", "and", "end", "-", "to", "-", "end", "models", "across", "high", ",", "medium", ",", "and", "low", "-", "resource", "conditions", ",", "and", "show", "that", "cascades", "remain", "stronger", "baselines", ".", "further", ",", "we", "introduce", "two", "methods", "to", "incorporate", "phone", "features", "into", "st", "models", ".", "we", "show", "that", "these", "features", "improve", "both", "architectures", ",", "closing", "the", "gap", "between", "end", "-", "to", "-", "end", "models", "and", "cascades", ",", "and", "outperforming", "previous", "academic", "work", "\u2013", "by", "up", "to", "9", "bleu", "on", "our", "low", "-", "resource", "setting", "."]}, {"venue": "ACL", "title": "Recursive Template-based Frame Generation for Task Oriented Dialog", "abstract": "The Natural Language Understanding (NLU) component in task oriented dialog systems processes a user\u2019s request and converts it into structured information that can be consumed by downstream components such as the Dialog State Tracker (DST). This information is typically represented as a semantic frame that captures the intent and slot-labels provided by the user. We first show that such a shallow representation is insufficient for complex dialog scenarios, because it does not capture the recursive nature inherent in many domains. We propose a recursive, hierarchical frame-based representation and show how to learn it from data. We formulate the frame generation task as a template-based tree decoding task, where the decoder recursively generates a template and then fills slot values into the template. We extend local tree-based loss functions with terms that provide global supervision and show how to optimize them end-to-end. We achieve a small improvement on the widely used ATIS dataset and a much larger improvement on a more complex dataset we describe here.", "doc_id": "896eb601d85f56b5a70d426bc55b2b49", "publication_year": 2020, "sentences": ["the natural language understanding ( nlu ) component in task oriented dialog systems processes a user \u2019 s request and converts it into structured information that can be consumed by downstream components such as the dialog state tracker ( dst ) .", "this information is typically represented as a semantic frame that captures the intent and slot - labels provided by the user .", "we first show that such a shallow representation is insufficient for complex dialog scenarios , because it does not capture the recursive nature inherent in many domains .", "we propose a recursive , hierarchical frame - based representation and show how to learn it from data .", "we formulate the frame generation task as a template - based tree decoding task , where the decoder recursively generates a template and then fills slot values into the template .", "we extend local tree - based loss functions with terms that provide global supervision and show how to optimize them end - to - end .", "we achieve a small improvement on the widely used atis dataset and a much larger improvement on a more complex dataset we describe here ."], "events": [{"event_type": "ITT", "arguments": [], "trigger": {"text": "oriented", "tokens": ["oriented"], "offsets": [10]}}, {"event_type": "RWS", "arguments": [{"text": "semantic frame", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["semantic", "frame"], "offsets": [49, 50]}, {"text": "intent and slot - labels", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["intent", "and", "slot", "-", "labels"], "offsets": [54, 55, 56, 57, 58]}], "trigger": {"text": "captures", "tokens": ["captures"], "offsets": [52]}}, {"event_type": "RWF", "arguments": [{"text": "shallow representation", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["shallow", "representation"], "offsets": [70, 71]}, {"text": "complex dialog scenarios", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["complex", "dialog", "scenarios"], "offsets": [75, 76, 77]}, {"text": "insufficient", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["insufficient"], "offsets": [73]}], "trigger": {"text": "insufficient", "tokens": ["insufficient"], "offsets": [73]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [92]}, {"text": "recursive , hierarchical frame - based representation", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["recursive", ",", "hierarchical", "frame", "-", "based", "representation"], "offsets": [95, 96, 97, 98, 99, 100, 101]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [93]}}, {"event_type": "MDS", "arguments": [{"text": "decoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["decoder"], "offsets": [128]}, {"text": "template", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["template"], "offsets": [132]}], "trigger": {"text": "generates", "tokens": ["generates"], "offsets": [130]}}, {"event_type": "MDS", "arguments": [{"text": "slot values", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["slot", "values"], "offsets": [136, 137]}, {"text": "decoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["decoder"], "offsets": [128]}], "trigger": {"text": "fills", "tokens": ["fills"], "offsets": [135]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [142]}, {"text": "local tree - based loss functions", "nugget_type": "APP", "argument_type": "Content", "tokens": ["local", "tree", "-", "based", "loss", "functions"], "offsets": [144, 145, 146, 147, 148, 149]}], "trigger": {"text": "extend", "tokens": ["extend"], "offsets": [143]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [142]}, {"text": "how to optimize them end - to - end", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["how", "to", "optimize", "them", "end", "-", "to", "-", "end"], "offsets": [158, 159, 160, 161, 162, 163, 164, 165, 166]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [157]}}, {"event_type": "FAC", "arguments": [{"text": "much larger", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["much", "larger"], "offsets": [181, 182]}, {"text": "improvement", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["improvement"], "offsets": [183]}, {"text": "more complex dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["more", "complex", "dataset"], "offsets": [186, 187, 188]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [169]}}, {"event_type": "FAC", "arguments": [{"text": "small", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["small"], "offsets": [171]}, {"text": "improvement", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["improvement"], "offsets": [172]}, {"text": "widely used atis dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["widely", "used", "atis", "dataset"], "offsets": [175, 176, 177, 178]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [169]}}], "document": ["the", "natural", "language", "understanding", "(", "nlu", ")", "component", "in", "task", "oriented", "dialog", "systems", "processes", "a", "user", "\u2019", "s", "request", "and", "converts", "it", "into", "structured", "information", "that", "can", "be", "consumed", "by", "downstream", "components", "such", "as", "the", "dialog", "state", "tracker", "(", "dst", ")", ".", "this", "information", "is", "typically", "represented", "as", "a", "semantic", "frame", "that", "captures", "the", "intent", "and", "slot", "-", "labels", "provided", "by", "the", "user", ".", "we", "first", "show", "that", "such", "a", "shallow", "representation", "is", "insufficient", "for", "complex", "dialog", "scenarios", ",", "because", "it", "does", "not", "capture", "the", "recursive", "nature", "inherent", "in", "many", "domains", ".", "we", "propose", "a", "recursive", ",", "hierarchical", "frame", "-", "based", "representation", "and", "show", "how", "to", "learn", "it", "from", "data", ".", "we", "formulate", "the", "frame", "generation", "task", "as", "a", "template", "-", "based", "tree", "decoding", "task", ",", "where", "the", "decoder", "recursively", "generates", "a", "template", "and", "then", "fills", "slot", "values", "into", "the", "template", ".", "we", "extend", "local", "tree", "-", "based", "loss", "functions", "with", "terms", "that", "provide", "global", "supervision", "and", "show", "how", "to", "optimize", "them", "end", "-", "to", "-", "end", ".", "we", "achieve", "a", "small", "improvement", "on", "the", "widely", "used", "atis", "dataset", "and", "a", "much", "larger", "improvement", "on", "a", "more", "complex", "dataset", "we", "describe", "here", "."]}, {"venue": "ACL", "title": "Soft Contextual Data Augmentation for Neural Machine Translation", "abstract": "While data augmentation is an important trick to boost the accuracy of deep learning methods in computer vision tasks, its study in natural language tasks is still very limited. In this paper, we present a novel data augmentation method for neural machine translation.Different from previous augmentation methods that randomly drop, swap or replace words with other words in a sentence, we softly augment a randomly chosen word in a sentence by its contextual mixture of multiple related words. More accurately, we replace the one-hot representation of a word by a distribution (provided by a language model) over the vocabulary, i.e., replacing the embedding of this word by a weighted combination of multiple semantically similar words. Since the weights of those words depend on the contextual information of the word to be replaced,the newly generated sentences capture much richer information than previous augmentation methods. Experimental results on both small scale and large scale machine translation data sets demonstrate the superiority of our method over strong baselines.", "doc_id": "e59d60435abe6e47ac89acdf62779e9c", "publication_year": 2019, "sentences": ["while data augmentation is an important trick to boost the accuracy of deep learning methods in computer vision tasks , its study in natural language tasks is still very limited .", "in this paper , we present a novel data augmentation method for neural machine translation .", "different from previous augmentation methods that randomly drop , swap or replace words with other words in a sentence , we softly augment a randomly chosen word in a sentence by its contextual mixture of multiple related words .", "more accurately , we replace the one - hot representation of a word by a distribution ( provided by a language model ) over the vocabulary , i . e . , replacing the embedding of this word by a weighted combination of multiple semantically similar words .", "since the weights of those words depend on the contextual information of the word to be replaced , the newly generated sentences capture much richer information than previous augmentation methods .", "experimental results on both small scale and large scale machine translation data sets demonstrate the superiority of our method over strong baselines ."], "events": [{"event_type": "ITT", "arguments": [{"text": "computer vision tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["computer", "vision", "tasks"], "offsets": [16, 17, 18]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "data augmentation", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["data", "augmentation"], "offsets": [1, 2]}, {"text": "very limited", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["very", "limited"], "offsets": [28, 29]}], "trigger": {"text": "very limited", "tokens": ["very", "limited"], "offsets": [28, 29]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [35]}, {"text": "data augmentation method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["data", "augmentation", "method"], "offsets": [39, 40, 41]}, {"text": "neural machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["neural", "machine", "translation"], "offsets": [43, 44, 45]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [36]}}, {"event_type": "MDS", "arguments": [{"text": "randomly chosen word", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["randomly", "chosen", "word"], "offsets": [71, 72, 73]}, {"text": "its contextual mixture of multiple related words", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["its", "contextual", "mixture", "of", "multiple", "related", "words"], "offsets": [78, 79, 80, 81, 82, 83, 84]}], "trigger": {"text": "softly augment", "tokens": ["softly", "augment"], "offsets": [68, 69]}}, {"event_type": "MDS", "arguments": [{"text": "one - hot representation of a word", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["one", "-", "hot", "representation", "of", "a", "word"], "offsets": [92, 93, 94, 95, 96, 97, 98]}, {"text": "distribution ( provided by a language model ) over the vocabulary", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["distribution", "over", "the", "vocabulary"], "offsets": [101, 109, 110, 111]}], "trigger": {"text": "replace", "tokens": ["replace"], "offsets": [90]}}, {"event_type": "FIN", "arguments": [{"text": "superiority", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["superiority"], "offsets": [180]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [178]}}, {"event_type": "CMP", "arguments": [{"text": "strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines"], "offsets": [185, 186]}, {"text": "data augmentation method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["data", "augmentation", "method"], "offsets": [39, 40, 41]}, {"text": "our method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["data", "augmentation", "method"], "offsets": [39, 40, 41]}, {"text": "superiority", "nugget_type": "STR", "argument_type": "Result", "tokens": ["superiority"], "offsets": [180]}, {"text": "small scale machine translation data sets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["small", "scale", "machine", "translation", "data", "sets"], "offsets": [169, 170, 174, 175, 176, 177]}], "trigger": {"text": "superiority", "tokens": ["superiority"], "offsets": [180]}}, {"event_type": "RWS", "arguments": [{"text": "words", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["words"], "offsets": [59]}, {"text": "other words", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["other", "words"], "offsets": [61, 62]}, {"text": "in a sentence", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "sentence"], "offsets": [63, 64, 65]}, {"text": "previous augmentation methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "augmentation", "methods"], "offsets": [49, 50, 51]}], "trigger": {"text": "randomly drop , swap or replace", "tokens": ["randomly", "drop", ",", "swap", "or", "replace"], "offsets": [53, 54, 55, 56, 57, 58]}}, {"event_type": "FAC", "arguments": [{"text": "newly generated sentences", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["newly", "generated", "sentences"], "offsets": [153, 154, 155]}, {"text": "much richer information", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["much", "richer", "information"], "offsets": [157, 158, 159]}, {"text": "since the weights of those words depend on the contextual information of the word to be replaced", "nugget_type": "LIM", "argument_type": "Reason", "tokens": ["since", "the", "weights", "of", "those", "words", "depend", "on", "the", "contextual", "information", "of", "the", "word", "to", "be", "replaced"], "offsets": [134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [156]}}], "document": ["while", "data", "augmentation", "is", "an", "important", "trick", "to", "boost", "the", "accuracy", "of", "deep", "learning", "methods", "in", "computer", "vision", "tasks", ",", "its", "study", "in", "natural", "language", "tasks", "is", "still", "very", "limited", ".", "in", "this", "paper", ",", "we", "present", "a", "novel", "data", "augmentation", "method", "for", "neural", "machine", "translation", ".", "different", "from", "previous", "augmentation", "methods", "that", "randomly", "drop", ",", "swap", "or", "replace", "words", "with", "other", "words", "in", "a", "sentence", ",", "we", "softly", "augment", "a", "randomly", "chosen", "word", "in", "a", "sentence", "by", "its", "contextual", "mixture", "of", "multiple", "related", "words", ".", "more", "accurately", ",", "we", "replace", "the", "one", "-", "hot", "representation", "of", "a", "word", "by", "a", "distribution", "(", "provided", "by", "a", "language", "model", ")", "over", "the", "vocabulary", ",", "i", ".", "e", ".", ",", "replacing", "the", "embedding", "of", "this", "word", "by", "a", "weighted", "combination", "of", "multiple", "semantically", "similar", "words", ".", "since", "the", "weights", "of", "those", "words", "depend", "on", "the", "contextual", "information", "of", "the", "word", "to", "be", "replaced", ",", "the", "newly", "generated", "sentences", "capture", "much", "richer", "information", "than", "previous", "augmentation", "methods", ".", "experimental", "results", "on", "both", "small", "scale", "and", "large", "scale", "machine", "translation", "data", "sets", "demonstrate", "the", "superiority", "of", "our", "method", "over", "strong", "baselines", "."]}, {"venue": "ACL", "title": "RotateQVS: Representing Temporal Information as Rotations in Quaternion Vector Space for Temporal Knowledge Graph Completion", "abstract": "Temporal factors are tied to the growth of facts in realistic applications, such as the progress of diseases and the development of political situation, therefore, research on Temporal Knowledge Graph (TKG) attracks much attention. In TKG, relation patterns inherent with temporality are required to be studied for representation learning and reasoning across temporal facts. However, existing methods can hardly model temporal relation patterns, nor can capture the intrinsic connections between relations when evolving over time, lacking of interpretability. In this paper, we propose a novel temporal modeling method which represents temporal entities as Rotations in Quaternion Vector Space (RotateQVS) and relations as complex vectors in Hamilton\u2019s quaternion space. We demonstrate our method can model key patterns of relations in TKG, such as symmetry, asymmetry, inverse, and can capture time-evolved relations by theory. And empirically, we show that our method can boost the performance of link prediction tasks over four temporal knowledge graph benchmarks.", "doc_id": "812ef8d8098f5a9782e737c5fae9b104", "publication_year": 2022, "sentences": ["temporal factors are tied to the growth of facts in realistic applications , such as the progress of diseases and the development of political situation , therefore , research on temporal knowledge graph ( tkg ) attracks much attention .", "in tkg , relation patterns inherent with temporality are required to be studied for representation learning and reasoning across temporal facts .", "however , existing methods can hardly model temporal relation patterns , nor can capture the intrinsic connections between relations when evolving over time , lacking of interpretability .", "in this paper , we propose a novel temporal modeling method which represents temporal entities as rotations in quaternion vector space ( rotateqvs ) and relations as complex vectors in hamilton \u2019 s quaternion space .", "we demonstrate our method can model key patterns of relations in tkg , such as symmetry , asymmetry , inverse , and can capture time - evolved relations by theory .", "and empirically , we show that our method can boost the performance of link prediction tasks over four temporal knowledge graph benchmarks ."], "events": [{"event_type": "ITT", "arguments": [{"text": "temporal knowledge graph", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["temporal", "knowledge", "graph"], "offsets": [30, 31, 32]}], "trigger": {"text": "attracks", "tokens": ["attracks"], "offsets": [36]}}, {"event_type": "RWS", "arguments": [{"text": "inherent with temporality", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["inherent", "with", "temporality"], "offsets": [45, 46, 47]}, {"text": "relation patterns", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["relation", "patterns"], "offsets": [43, 44]}, {"text": "across temporal facts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "temporal", "facts"], "offsets": [58, 59, 60]}, {"text": "representation learning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["representation", "learning"], "offsets": [54, 55]}, {"text": "representation reasoning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["representation", "reasoning"], "offsets": [54, 57]}], "trigger": {"text": "studied", "tokens": ["studied"], "offsets": [52]}}, {"event_type": "RWF", "arguments": [{"text": "hardly model", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["hardly", "model"], "offsets": [67, 68]}, {"text": "existing methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "methods"], "offsets": [64, 65]}], "trigger": {"text": "hardly model", "tokens": ["hardly", "model"], "offsets": [67, 68]}}, {"event_type": "RWF", "arguments": [{"text": "nor can capture", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["nor", "can", "capture"], "offsets": [73, 74, 75]}, {"text": "intrinsic connections", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["intrinsic", "connections"], "offsets": [77, 78]}, {"text": "between relations when evolving over time", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "relations", "when", "evolving", "over", "time"], "offsets": [79, 80, 81, 82, 83, 84]}], "trigger": {"text": "nor can capture", "tokens": ["nor", "can", "capture"], "offsets": [73, 74, 75]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [94]}, {"text": "temporal modeling method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["temporal", "modeling", "method"], "offsets": [98, 99, 100]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [95]}}, {"event_type": "MDS", "arguments": [{"text": "temporal entities", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["temporal", "entities"], "offsets": [103, 104]}, {"text": "in quaternion vector space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "quaternion", "vector", "space"], "offsets": [107, 108, 109, 110]}, {"text": "relations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["relations"], "offsets": [115]}, {"text": "in hamilton \u2019 s quaternion space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "hamilton", "\u2019", "s", "quaternion", "space"], "offsets": [119, 120, 121, 122, 123, 124]}, {"text": "complex vectors", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["complex", "vectors"], "offsets": [117, 118]}, {"text": "rotations", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["rotations"], "offsets": [106]}], "trigger": {"text": "represents", "tokens": ["represents"], "offsets": [102]}}, {"event_type": "FAC", "arguments": [{"text": "key patterns of relations", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["key", "patterns", "of", "relations"], "offsets": [132, 133, 134, 135]}, {"text": "temporal modeling method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["temporal", "modeling", "method"], "offsets": [98, 99, 100]}, {"text": "temporal knowledge graph", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["temporal", "knowledge", "graph"], "offsets": [30, 31, 32]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [131]}}, {"event_type": "FAC", "arguments": [{"text": "temporal modeling method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["temporal", "modeling", "method"], "offsets": [98, 99, 100]}, {"text": "time - evolved relations", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["time", "-", "evolved", "relations"], "offsets": [150, 151, 152, 153]}, {"text": "by theory", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "theory"], "offsets": [154, 155]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [149]}}, {"event_type": "FIN", "arguments": [{"text": "boost", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["boost"], "offsets": [166]}, {"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [160]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [161]}}, {"event_type": "FAC", "arguments": [{"text": "temporal modeling method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["temporal", "modeling", "method"], "offsets": [98, 99, 100]}, {"text": "performance of link prediction tasks", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance", "of", "link", "prediction", "tasks"], "offsets": [168, 169, 170, 171, 172]}, {"text": "four temporal knowledge graph benchmarks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["four", "temporal", "knowledge", "graph", "benchmarks"], "offsets": [174, 175, 176, 177, 178]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [166]}}], "document": ["temporal", "factors", "are", "tied", "to", "the", "growth", "of", "facts", "in", "realistic", "applications", ",", "such", "as", "the", "progress", "of", "diseases", "and", "the", "development", "of", "political", "situation", ",", "therefore", ",", "research", "on", "temporal", "knowledge", "graph", "(", "tkg", ")", "attracks", "much", "attention", ".", "in", "tkg", ",", "relation", "patterns", "inherent", "with", "temporality", "are", "required", "to", "be", "studied", "for", "representation", "learning", "and", "reasoning", "across", "temporal", "facts", ".", "however", ",", "existing", "methods", "can", "hardly", "model", "temporal", "relation", "patterns", ",", "nor", "can", "capture", "the", "intrinsic", "connections", "between", "relations", "when", "evolving", "over", "time", ",", "lacking", "of", "interpretability", ".", "in", "this", "paper", ",", "we", "propose", "a", "novel", "temporal", "modeling", "method", "which", "represents", "temporal", "entities", "as", "rotations", "in", "quaternion", "vector", "space", "(", "rotateqvs", ")", "and", "relations", "as", "complex", "vectors", "in", "hamilton", "\u2019", "s", "quaternion", "space", ".", "we", "demonstrate", "our", "method", "can", "model", "key", "patterns", "of", "relations", "in", "tkg", ",", "such", "as", "symmetry", ",", "asymmetry", ",", "inverse", ",", "and", "can", "capture", "time", "-", "evolved", "relations", "by", "theory", ".", "and", "empirically", ",", "we", "show", "that", "our", "method", "can", "boost", "the", "performance", "of", "link", "prediction", "tasks", "over", "four", "temporal", "knowledge", "graph", "benchmarks", "."]}, {"venue": "ACL", "title": "Pre-train and Plug-in: Flexible Conditional Text Generation with Variational Auto-Encoders", "abstract": "Conditional Text Generation has drawn much attention as a topic of Natural Language Generation (NLG) which provides the possibility for humans to control the properties of generated contents. Current conditional generation models cannot handle emerging conditions due to their joint end-to-end learning fashion. When a new condition added, these techniques require full retraining. In this paper, we present a new framework named Pre-train and Plug-in Variational Auto-Encoder (PPVAE) towards flexible conditional text generation. PPVAE decouples the text generation module from the condition representation module to allow \u201cone-to-many\u201d conditional generation. When a fresh condition emerges, only a lightweight network needs to be trained and works as a plug-in for PPVAE, which is efficient and desirable for real-world applications. Extensive experiments demonstrate the superiority of PPVAE against the existing alternatives with better conditionality and diversity but less training effort.", "doc_id": "3fa58fd0fa44fe3c19d584bdae371ae7", "publication_year": 2020, "sentences": ["conditional text generation has drawn much attention as a topic of natural language generation ( nlg ) which provides the possibility for humans to control the properties of generated contents .", "current conditional generation models cannot handle emerging conditions due to their joint end - to - end learning fashion .", "when a new condition added , these techniques require full retraining .", "in this paper , we present a new framework named pre - train and plug - in variational auto - encoder ( ppvae ) towards flexible conditional text generation .", "ppvae decouples the text generation module from the condition representation module to allow \u201c one - to - many \u201d conditional generation .", "when a fresh condition emerges , only a lightweight network needs to be trained and works as a plug - in for ppvae , which is efficient and desirable for real - world applications .", "extensive experiments demonstrate the superiority of ppvae against the existing alternatives with better conditionality and diversity but less training effort ."], "events": [{"event_type": "ITT", "arguments": [{"text": "conditional text generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["conditional", "text", "generation"], "offsets": [0, 1, 2]}], "trigger": {"text": "drawn", "tokens": ["drawn"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "current conditional generation models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["current", "conditional", "generation", "models"], "offsets": [31, 32, 33, 34]}, {"text": "cannot handle", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["cannot", "handle"], "offsets": [35, 36]}], "trigger": {"text": "cannot handle", "tokens": ["cannot", "handle"], "offsets": [35, 36]}}, {"event_type": "PRP", "arguments": [{"text": "flexible conditional text generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["flexible", "conditional", "text", "generation"], "offsets": [88, 89, 90, 91]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [68]}}, {"event_type": "MDS", "arguments": [{"text": "condition representation module", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["condition", "representation", "module"], "offsets": [101, 102, 103]}, {"text": "text generation module", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["text", "generation", "module"], "offsets": [96, 97, 98]}, {"text": "allow", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["allow"], "offsets": [105]}], "trigger": {"text": "decouples", "tokens": ["decouples"], "offsets": [94]}}, {"event_type": "PUR", "arguments": [{"text": "\u201c one - to - many \u201d conditional generation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["\u201c", "one", "-", "to", "-", "many", "\u201d", "conditional", "generation"], "offsets": [106, 107, 108, 109, 110, 111, 112, 113, 114]}], "trigger": {"text": "allow", "tokens": ["allow"], "offsets": [105]}}, {"event_type": "MDS", "arguments": [{"text": "lightweight network", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["lightweight", "network"], "offsets": [124, 125]}, {"text": "when a fresh condition emerges", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "a", "fresh", "condition", "emerges"], "offsets": [116, 117, 118, 119, 120]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [129]}}, {"event_type": "MDS", "arguments": [{"text": "lightweight network", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["lightweight", "network"], "offsets": [124, 125]}, {"text": "plug - in for ppvae", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["plug", "-", "in", "for", "pre", "-", "train", "and", "plug", "-", "in", "variational", "auto", "-", "encoder"], "offsets": [134, 135, 136, 137, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83]}], "trigger": {"text": "works", "tokens": ["works"], "offsets": [131]}}, {"event_type": "FIN", "arguments": [{"text": "superiority", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["superiority"], "offsets": [155]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [153]}}, {"event_type": "CMP", "arguments": [{"text": "existing alternatives", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["existing", "alternatives"], "offsets": [160, 161]}, {"text": "pre - train and plug - in variational auto - encoder", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["pre", "-", "train", "and", "plug", "-", "in", "variational", "auto", "-", "encoder"], "offsets": [73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83]}, {"text": "less training effort", "nugget_type": "WEA", "argument_type": "Result", "tokens": ["less", "training", "effort"], "offsets": [168, 169, 170]}, {"text": "superiority", "nugget_type": "STR", "argument_type": "Result", "tokens": ["superiority"], "offsets": [155]}, {"text": "better conditionality", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better", "conditionality"], "offsets": [163, 164]}], "trigger": {"text": "superiority", "tokens": ["superiority"], "offsets": [155]}}], "document": ["conditional", "text", "generation", "has", "drawn", "much", "attention", "as", "a", "topic", "of", "natural", "language", "generation", "(", "nlg", ")", "which", "provides", "the", "possibility", "for", "humans", "to", "control", "the", "properties", "of", "generated", "contents", ".", "current", "conditional", "generation", "models", "cannot", "handle", "emerging", "conditions", "due", "to", "their", "joint", "end", "-", "to", "-", "end", "learning", "fashion", ".", "when", "a", "new", "condition", "added", ",", "these", "techniques", "require", "full", "retraining", ".", "in", "this", "paper", ",", "we", "present", "a", "new", "framework", "named", "pre", "-", "train", "and", "plug", "-", "in", "variational", "auto", "-", "encoder", "(", "ppvae", ")", "towards", "flexible", "conditional", "text", "generation", ".", "ppvae", "decouples", "the", "text", "generation", "module", "from", "the", "condition", "representation", "module", "to", "allow", "\u201c", "one", "-", "to", "-", "many", "\u201d", "conditional", "generation", ".", "when", "a", "fresh", "condition", "emerges", ",", "only", "a", "lightweight", "network", "needs", "to", "be", "trained", "and", "works", "as", "a", "plug", "-", "in", "for", "ppvae", ",", "which", "is", "efficient", "and", "desirable", "for", "real", "-", "world", "applications", ".", "extensive", "experiments", "demonstrate", "the", "superiority", "of", "ppvae", "against", "the", "existing", "alternatives", "with", "better", "conditionality", "and", "diversity", "but", "less", "training", "effort", "."]}, {"venue": "ACL", "title": "Do Context-Aware Translation Models Pay the Right Attention?", "abstract": "Context-aware machine translation models are designed to leverage contextual information, but often fail to do so. As a result, they inaccurately disambiguate pronouns and polysemous words that require context for resolution. In this paper, we ask several questions: What contexts do human translators use to resolve ambiguous words? Are models paying large amounts of attention to the same context? What if we explicitly train them to do so? To answer these questions, we introduce SCAT (Supporting Context for Ambiguous Translations), a new English-French dataset comprising supporting context words for 14K translations that professional translators found useful for pronoun disambiguation. Using SCAT, we perform an in-depth analysis of the context used to disambiguate, examining positional and lexical characteristics of the supporting words. Furthermore, we measure the degree of alignment between the model\u2019s attention scores and the supporting context from SCAT, and apply a guided attention strategy to encourage agreement between the two.", "doc_id": "55756c0e190c15a9145fa6953eb2eb79", "publication_year": 2021, "sentences": ["context - aware machine translation models are designed to leverage contextual information , but often fail to do so .", "as a result , they inaccurately disambiguate pronouns and polysemous words that require context for resolution .", "in this paper , we ask several questions : what contexts do human translators use to resolve ambiguous words ?", "are models paying large amounts of attention to the same context ?", "what if we explicitly train them to do so ?", "to answer these questions , we introduce scat ( supporting context for ambiguous translations ) , a new english - french dataset comprising supporting context words for 14k translations that professional translators found useful for pronoun disambiguation .", "using scat , we perform an in - depth analysis of the context used to disambiguate , examining positional and lexical characteristics of the supporting words .", "furthermore , we measure the degree of alignment between the model \u2019 s attention scores and the supporting context from scat , and apply a guided attention strategy to encourage agreement between the two ."], "events": [{"event_type": "ITT", "arguments": [{"text": "context - aware machine translation models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["context", "-", "aware", "machine", "translation", "models"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [9]}}, {"event_type": "RWF", "arguments": [{"text": "context - aware machine translation models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["context", "-", "aware", "machine", "translation", "models"], "offsets": [0, 1, 2, 3, 4, 5]}, {"text": "inaccurately disambiguate", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["inaccurately", "disambiguate"], "offsets": [25, 26]}, {"text": "pronouns words", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["pronouns", "words"], "offsets": [27, 30]}], "trigger": {"text": "inaccurately disambiguate", "tokens": ["inaccurately", "disambiguate"], "offsets": [25, 26]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [84]}, {"text": "scat", "nugget_type": "DST", "argument_type": "Content", "tokens": ["scat"], "offsets": [86]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [85]}}, {"event_type": "WKS", "arguments": [{"text": "supporting context words", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["supporting", "context", "words"], "offsets": [102, 103, 104]}, {"text": "14k translations", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["14k", "translations"], "offsets": [106, 107]}], "trigger": {"text": "comprising", "tokens": ["comprising"], "offsets": [101]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [120]}, {"text": "in - depth analysis of the context used to disambiguate", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["in", "-", "depth", "analysis", "of", "the", "context", "used", "to", "disambiguate"], "offsets": [123, 124, 125, 126, 127, 128, 129, 130, 131, 132]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [121]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [120]}, {"text": "positional of the supporting words", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["positional", "of", "the", "supporting", "words"], "offsets": [135, 139, 140, 141, 142]}, {"text": "lexical characteristics of the supporting words", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["lexical", "characteristics", "of", "the", "supporting", "words"], "offsets": [137, 138, 139, 140, 141, 142]}], "trigger": {"text": "examining", "tokens": ["examining"], "offsets": [134]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [146]}, {"text": "degree of alignment", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["degree", "of", "alignment"], "offsets": [149, 150, 151]}, {"text": "between the model \u2019 s attention scores and the supporting context from scat", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "the", "model", "\u2019", "s", "attention", "scores", "and", "the", "supporting", "context", "from", "scat"], "offsets": [152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164]}], "trigger": {"text": "measure", "tokens": ["measure"], "offsets": [147]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [146]}, {"text": "guided attention strategy", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["guided", "attention", "strategy"], "offsets": [169, 170, 171]}, {"text": "encourage", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["encourage"], "offsets": [173]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [167]}}, {"event_type": "PUR", "arguments": [{"text": "agreement", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["agreement"], "offsets": [174]}, {"text": "between the two", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "attention", "scores", "and", "the", "supporting", "context"], "offsets": [175, 157, 158, 159, 160, 161, 162]}], "trigger": {"text": "encourage", "tokens": ["encourage"], "offsets": [173]}}], "document": ["context", "-", "aware", "machine", "translation", "models", "are", "designed", "to", "leverage", "contextual", "information", ",", "but", "often", "fail", "to", "do", "so", ".", "as", "a", "result", ",", "they", "inaccurately", "disambiguate", "pronouns", "and", "polysemous", "words", "that", "require", "context", "for", "resolution", ".", "in", "this", "paper", ",", "we", "ask", "several", "questions", ":", "what", "contexts", "do", "human", "translators", "use", "to", "resolve", "ambiguous", "words", "?", "are", "models", "paying", "large", "amounts", "of", "attention", "to", "the", "same", "context", "?", "what", "if", "we", "explicitly", "train", "them", "to", "do", "so", "?", "to", "answer", "these", "questions", ",", "we", "introduce", "scat", "(", "supporting", "context", "for", "ambiguous", "translations", ")", ",", "a", "new", "english", "-", "french", "dataset", "comprising", "supporting", "context", "words", "for", "14k", "translations", "that", "professional", "translators", "found", "useful", "for", "pronoun", "disambiguation", ".", "using", "scat", ",", "we", "perform", "an", "in", "-", "depth", "analysis", "of", "the", "context", "used", "to", "disambiguate", ",", "examining", "positional", "and", "lexical", "characteristics", "of", "the", "supporting", "words", ".", "furthermore", ",", "we", "measure", "the", "degree", "of", "alignment", "between", "the", "model", "\u2019", "s", "attention", "scores", "and", "the", "supporting", "context", "from", "scat", ",", "and", "apply", "a", "guided", "attention", "strategy", "to", "encourage", "agreement", "between", "the", "two", "."]}, {"venue": "ACL", "title": "Text-to-Table: A New Way of Information Extraction", "abstract": "We study a new problem setting of information extraction (IE), referred to as text-to-table. In text-to-table, given a text, one creates a table or several tables expressing the main content of the text, while the model is learned from text-table pair data. The problem setting differs from those of the existing methods for IE. First, the extraction can be carried out from long texts to large tables with complex structures. Second, the extraction is entirely data-driven, and there is no need to explicitly define the schemas. As far as we know, there has been no previous work that studies the problem. In this work, we formalize text-to-table as a sequence-to-sequence (seq2seq) problem. We first employ a seq2seq model fine-tuned from a pre-trained language model to perform the task. We also develop a new method within the seq2seq approach, exploiting two additional techniques in table generation: table constraint and table relation embeddings. We consider text-to-table as an inverse problem of the well-studied table-to-text, and make use of four existing table-to-text datasets in our experiments on text-to-table. Experimental results show that the vanilla seq2seq model can outperform the baseline methods of using relation extraction and named entity extraction. The results also show that our method can further boost the performances of the vanilla seq2seq model. We further discuss the main challenges of the proposed task. The code and data are available at https://github.com/shirley-wu/text_to_table.", "doc_id": "759cfeae21431ee534c98ad029a57441", "publication_year": 2022, "sentences": ["we study a new problem setting of information extraction ( ie ) , referred to as text - to - table .", "in text - to - table , given a text , one creates a table or several tables expressing the main content of the text , while the model is learned from text - table pair data .", "the problem setting differs from those of the existing methods for ie .", "first , the extraction can be carried out from long texts to large tables with complex structures .", "second , the extraction is entirely data - driven , and there is no need to explicitly define the schemas .", "as far as we know , there has been no previous work that studies the problem .", "in this work , we formalize text - to - table as a sequence - to - sequence ( seq2seq ) problem .", "we first employ a seq2seq model fine - tuned from a pre - trained language model to perform the task .", "we also develop a new method within the seq2seq approach , exploiting two additional techniques in table generation : table constraint and table relation embeddings .", "we consider text - to - table as an inverse problem of the well - studied table - to - text , and make use of four existing table - to - text datasets in our experiments on text - to - table .", "experimental results show that the vanilla seq2seq model can outperform the baseline methods of using relation extraction and named entity extraction .", "the results also show that our method can further boost the performances of the vanilla seq2seq model .", "we further discuss the main challenges of the proposed task .", "the code and data are available at https : / / github . com / shirley - wu / text _ to _ table ."], "events": [{"event_type": "ITT", "arguments": [{"text": "text - to - table", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["text", "-", "to", "-", "table"], "offsets": [16, 17, 18, 19, 20]}], "trigger": {"text": "problem", "tokens": ["problem"], "offsets": [4]}}, {"event_type": "WKS", "arguments": [{"text": "text - to - table", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["text", "-", "to", "-", "table"], "offsets": [135, 136, 137, 138, 139]}, {"text": "sequence - to - sequence ( seq2seq ) problem", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sequence", "-", "to", "-", "sequence", "problem"], "offsets": [142, 143, 144, 145, 146, 150]}], "trigger": {"text": "formalize", "tokens": ["formalize"], "offsets": [134]}}, {"event_type": "MDS", "arguments": [{"text": "develop", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["develop"], "offsets": [175]}, {"text": "table relation embeddings", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["table", "relation", "embeddings"], "offsets": [195, 196, 197]}, {"text": "table constraint", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["table", "constraint"], "offsets": [192, 193]}], "trigger": {"text": "exploiting", "tokens": ["exploiting"], "offsets": [184]}}, {"event_type": "WKS", "arguments": [{"text": "four existing table - to - text datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["four", "existing", "table", "-", "to", "-", "text", "datasets"], "offsets": [225, 226, 227, 228, 229, 230, 231, 232]}, {"text": "in our experiments on text - to - table", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "our", "experiments", "on", "text", "-", "to", "-", "table"], "offsets": [233, 234, 235, 236, 237, 238, 239, 240, 241]}], "trigger": {"text": "make", "tokens": ["make"], "offsets": [222]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [199]}, {"text": "text - to - table", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["text", "-", "to", "-", "table"], "offsets": [201, 202, 203, 204, 205]}, {"text": "inverse problem of the well - studied table - to - text", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["inverse", "problem", "of", "the", "well", "-", "studied", "table", "-", "to", "-", "text"], "offsets": [208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [200]}}, {"event_type": "FIN", "arguments": [{"text": "outperform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperform"], "offsets": [252]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [245]}}, {"event_type": "CMP", "arguments": [{"text": "vanilla seq2seq model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["vanilla", "seq2seq", "model"], "offsets": [248, 249, 250]}, {"text": "outperform", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperform"], "offsets": [252]}, {"text": "baseline methods of using relation extraction", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baseline", "methods", "of", "using", "relation", "extraction"], "offsets": [254, 255, 256, 257, 258, 259]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [252]}}, {"event_type": "FIN", "arguments": [{"text": "further boost", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["further", "boost"], "offsets": [273, 274]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [268]}}, {"event_type": "FAC", "arguments": [{"text": "performances of the vanilla seq2seq model", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performances", "of", "the", "vanilla", "seq2seq", "model"], "offsets": [276, 277, 278, 279, 280, 281]}, {"text": "method within the seq2seq approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["method", "within", "the", "seq2seq", "approach"], "offsets": [178, 179, 180, 181, 182]}], "trigger": {"text": "further boost", "tokens": ["further", "boost"], "offsets": [273, 274]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [283]}, {"text": "main challenges of the proposed task", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["main", "challenges", "of", "the", "proposed", "task"], "offsets": [287, 288, 289, 290, 291, 292]}], "trigger": {"text": "further discuss", "tokens": ["further", "discuss"], "offsets": [284, 285]}}, {"event_type": "MDS", "arguments": [{"text": "seq2seq model", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["seq2seq", "model"], "offsets": [156, 157]}, {"text": "pre - trained language model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["pre", "-", "trained", "language", "model"], "offsets": [163, 164, 165, 166, 167]}], "trigger": {"text": "fine - tuned", "tokens": ["fine", "-", "tuned"], "offsets": [158, 159, 160]}}, {"event_type": "PUR", "arguments": [{"text": "method within the seq2seq approach", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["method", "within", "the", "seq2seq", "approach"], "offsets": [178, 179, 180, 181, 182]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [175]}}], "document": ["we", "study", "a", "new", "problem", "setting", "of", "information", "extraction", "(", "ie", ")", ",", "referred", "to", "as", "text", "-", "to", "-", "table", ".", "in", "text", "-", "to", "-", "table", ",", "given", "a", "text", ",", "one", "creates", "a", "table", "or", "several", "tables", "expressing", "the", "main", "content", "of", "the", "text", ",", "while", "the", "model", "is", "learned", "from", "text", "-", "table", "pair", "data", ".", "the", "problem", "setting", "differs", "from", "those", "of", "the", "existing", "methods", "for", "ie", ".", "first", ",", "the", "extraction", "can", "be", "carried", "out", "from", "long", "texts", "to", "large", "tables", "with", "complex", "structures", ".", "second", ",", "the", "extraction", "is", "entirely", "data", "-", "driven", ",", "and", "there", "is", "no", "need", "to", "explicitly", "define", "the", "schemas", ".", "as", "far", "as", "we", "know", ",", "there", "has", "been", "no", "previous", "work", "that", "studies", "the", "problem", ".", "in", "this", "work", ",", "we", "formalize", "text", "-", "to", "-", "table", "as", "a", "sequence", "-", "to", "-", "sequence", "(", "seq2seq", ")", "problem", ".", "we", "first", "employ", "a", "seq2seq", "model", "fine", "-", "tuned", "from", "a", "pre", "-", "trained", "language", "model", "to", "perform", "the", "task", ".", "we", "also", "develop", "a", "new", "method", "within", "the", "seq2seq", "approach", ",", "exploiting", "two", "additional", "techniques", "in", "table", "generation", ":", "table", "constraint", "and", "table", "relation", "embeddings", ".", "we", "consider", "text", "-", "to", "-", "table", "as", "an", "inverse", "problem", "of", "the", "well", "-", "studied", "table", "-", "to", "-", "text", ",", "and", "make", "use", "of", "four", "existing", "table", "-", "to", "-", "text", "datasets", "in", "our", "experiments", "on", "text", "-", "to", "-", "table", ".", "experimental", "results", "show", "that", "the", "vanilla", "seq2seq", "model", "can", "outperform", "the", "baseline", "methods", "of", "using", "relation", "extraction", "and", "named", "entity", "extraction", ".", "the", "results", "also", "show", "that", "our", "method", "can", "further", "boost", "the", "performances", "of", "the", "vanilla", "seq2seq", "model", ".", "we", "further", "discuss", "the", "main", "challenges", "of", "the", "proposed", "task", ".", "the", "code", "and", "data", "are", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "shirley", "-", "wu", "/", "text", "_", "to", "_", "table", "."]}, {"venue": "ACL", "title": "What Context Features Can Transformer Language Models Use?", "abstract": "Transformer-based language models benefit from conditioning on contexts of hundreds to thousands of previous tokens. What aspects of these contexts contribute to accurate model prediction? We describe a series of experiments that measure usable information by selectively ablating lexical and structural information in transformer language models trained on English Wikipedia. In both mid- and long-range contexts, we find that several extremely destructive context manipulations\u2014including shuffling word order within sentences and deleting all words other than nouns\u2014remove less than 15% of the usable information. Our results suggest that long contexts, but not their detailed syntactic and propositional content, are important for the low perplexity of current transformer language models.", "doc_id": "ae42a48e725cd1536374318203a8ad59", "publication_year": 2021, "sentences": ["transformer - based language models benefit from conditioning on contexts of hundreds to thousands of previous tokens .", "what aspects of these contexts contribute to accurate model prediction ?", "we describe a series of experiments that measure usable information by selectively ablating lexical and structural information in transformer language models trained on english wikipedia .", "in both mid - and long - range contexts , we find that several extremely destructive context manipulations \u2014 including shuffling word order within sentences and deleting all words other than nouns \u2014 remove less than 15 % of the usable information .", "our results suggest that long contexts , but not their detailed syntactic and propositional content , are important for the low perplexity of current transformer language models ."], "events": [{"event_type": "ITT", "arguments": [{"text": "transformer - based language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["transformer", "-", "based", "language", "models"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "benefit", "tokens": ["benefit"], "offsets": [5]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [29]}, {"text": "lexical and structural information", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["lexical", "and", "structural", "information"], "offsets": [42, 43, 44, 45]}, {"text": "measure", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["measure"], "offsets": [36]}], "trigger": {"text": "ablating", "tokens": ["ablating"], "offsets": [41]}}, {"event_type": "PUR", "arguments": [{"text": "usable information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["usable", "information"], "offsets": [37, 38]}], "trigger": {"text": "measure", "tokens": ["measure"], "offsets": [36]}}, {"event_type": "FAC", "arguments": [{"text": "long contexts", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["long", "contexts"], "offsets": [102, 103]}, {"text": "low perplexity of current transformer language models", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["low", "perplexity", "of", "current", "transformer", "language", "models"], "offsets": [118, 119, 120, 121, 122, 123, 124]}], "trigger": {"text": "important", "tokens": ["important"], "offsets": [115]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [65]}, {"text": "remove", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["remove"], "offsets": [88]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [66]}}, {"event_type": "FAC", "arguments": [{"text": "several extremely destructive context manipulations", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["several", "extremely", "destructive", "context", "manipulations"], "offsets": [68, 69, 70, 71, 72]}, {"text": "usable information", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["usable", "information"], "offsets": [95, 96]}, {"text": "less than 15 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["less", "than", "15", "%"], "offsets": [89, 90, 91, 92]}], "trigger": {"text": "remove", "tokens": ["remove"], "offsets": [88]}}], "document": ["transformer", "-", "based", "language", "models", "benefit", "from", "conditioning", "on", "contexts", "of", "hundreds", "to", "thousands", "of", "previous", "tokens", ".", "what", "aspects", "of", "these", "contexts", "contribute", "to", "accurate", "model", "prediction", "?", "we", "describe", "a", "series", "of", "experiments", "that", "measure", "usable", "information", "by", "selectively", "ablating", "lexical", "and", "structural", "information", "in", "transformer", "language", "models", "trained", "on", "english", "wikipedia", ".", "in", "both", "mid", "-", "and", "long", "-", "range", "contexts", ",", "we", "find", "that", "several", "extremely", "destructive", "context", "manipulations", "\u2014", "including", "shuffling", "word", "order", "within", "sentences", "and", "deleting", "all", "words", "other", "than", "nouns", "\u2014", "remove", "less", "than", "15", "%", "of", "the", "usable", "information", ".", "our", "results", "suggest", "that", "long", "contexts", ",", "but", "not", "their", "detailed", "syntactic", "and", "propositional", "content", ",", "are", "important", "for", "the", "low", "perplexity", "of", "current", "transformer", "language", "models", "."]}, {"venue": "ACL", "title": "The statistical advantage of automatic NLG metrics at the system level", "abstract": "Estimating the expected output quality of generation systems is central to NLG. This paper qualifies the notion that automatic metrics are not as good as humans in estimating system-level quality. Statistically, humans are unbiased, high variance estimators, while metrics are biased, low variance estimators. We compare these estimators by their error in pairwise prediction (which generation system is better?) using the bootstrap. Measuring this error is complicated: predictions are evaluated against noisy, human predicted labels instead of the ground truth, and metric predictions fluctuate based on the test sets they were calculated on. By applying a bias-variance-noise decomposition, we adjust this error to a noise-free, infinite test set setting. Our analysis compares the adjusted error of metrics to humans and a derived, perfect segment-level annotator, both of which are unbiased estimators dependent on the number of judgments collected. In MT, we identify two settings where metrics outperform humans due to a statistical advantage in variance: when the number of human judgments used is small, and when the quality difference between compared systems is small.", "doc_id": "eaae6e8433638142f565020be335de38", "publication_year": 2021, "sentences": ["estimating the expected output quality of generation systems is central to nlg .", "this paper qualifies the notion that automatic metrics are not as good as humans in estimating system - level quality .", "statistically , humans are unbiased , high variance estimators , while metrics are biased , low variance estimators .", "we compare these estimators by their error in pairwise prediction ( which generation system is better ? ) using the bootstrap .", "measuring this error is complicated : predictions are evaluated against noisy , human predicted labels instead of the ground truth , and metric predictions fluctuate based on the test sets they were calculated on .", "by applying a bias - variance - noise decomposition , we adjust this error to a noise - free , infinite test set setting .", "our analysis compares the adjusted error of metrics to humans and a derived , perfect segment - level annotator , both of which are unbiased estimators dependent on the number of judgments collected .", "in mt , we identify two settings where metrics outperform humans due to a statistical advantage in variance : when the number of human judgments used is small , and when the quality difference between compared systems is small ."], "events": [{"event_type": "FIN", "arguments": [{"text": "not as good as", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["not", "as", "good", "as"], "offsets": [22, 23, 24, 25]}], "trigger": {"text": "qualifies", "tokens": ["qualifies"], "offsets": [15]}}, {"event_type": "CMP", "arguments": [{"text": "automatic metrics", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["automatic", "metrics"], "offsets": [19, 20]}, {"text": "humans", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["humans"], "offsets": [26]}, {"text": "not as good as", "nugget_type": "WEA", "argument_type": "Result", "tokens": ["not", "as", "good", "as"], "offsets": [22, 23, 24, 25]}], "trigger": {"text": "not as good as", "tokens": ["not", "as", "good", "as"], "offsets": [22, 23, 24, 25]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [53]}, {"text": "by their error in pairwise prediction", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "their", "error", "in", "pairwise", "prediction"], "offsets": [57, 58, 59, 60, 61, 62]}, {"text": "humans", "nugget_type": "APP", "argument_type": "Content", "tokens": ["humans"], "offsets": [36]}, {"text": "metrics", "nugget_type": "APP", "argument_type": "Content", "tokens": ["metrics"], "offsets": [45]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [54]}}, {"event_type": "RWF", "arguments": [{"text": "measuring", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["measuring"], "offsets": [75]}], "trigger": {"text": "complicated", "tokens": ["complicated"], "offsets": [79]}}, {"event_type": "PUR", "arguments": [{"text": "this error", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["this", "error"], "offsets": [76, 77]}], "trigger": {"text": "measuring", "tokens": ["measuring"], "offsets": [75]}}, {"event_type": "MDS", "arguments": [{"text": "bias - variance - noise decomposition", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["bias", "-", "variance", "-", "noise", "decomposition"], "offsets": [113, 114, 115, 116, 117, 118]}, {"text": "adjust", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["adjust"], "offsets": [121]}], "trigger": {"text": "applying", "tokens": ["applying"], "offsets": [111]}}, {"event_type": "PUR", "arguments": [{"text": "this error", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["this", "error"], "offsets": [122, 123]}, {"text": "to a noise - free , infinite test set setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "a", "noise", "-", "free", ",", "infinite", "test", "set", "setting"], "offsets": [124, 125, 126, 127, 128, 129, 130, 131, 132, 133]}], "trigger": {"text": "adjust", "tokens": ["adjust"], "offsets": [121]}}, {"event_type": "WKS", "arguments": [{"text": "adjusted error of metrics", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["adjusted", "error", "of", "metrics"], "offsets": [139, 140, 141, 142]}, {"text": "unbiased estimators", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["unbiased", "estimators"], "offsets": [159, 160]}], "trigger": {"text": "compares", "tokens": ["compares"], "offsets": [137]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [172]}, {"text": "quality difference", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["quality", "difference"], "offsets": [201, 202]}, {"text": "number of human judgments", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["number", "of", "human", "judgments"], "offsets": [190, 191, 192, 193]}, {"text": "where metrics outperform humans due to a statistical advantage in variance", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["where", "metrics", "outperform", "humans", "due", "to", "a", "statistical", "advantage", "in", "variance"], "offsets": [176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [173]}}, {"event_type": "PUR", "arguments": [{"text": "humans", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["humans"], "offsets": [179]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [178]}}], "document": ["estimating", "the", "expected", "output", "quality", "of", "generation", "systems", "is", "central", "to", "nlg", ".", "this", "paper", "qualifies", "the", "notion", "that", "automatic", "metrics", "are", "not", "as", "good", "as", "humans", "in", "estimating", "system", "-", "level", "quality", ".", "statistically", ",", "humans", "are", "unbiased", ",", "high", "variance", "estimators", ",", "while", "metrics", "are", "biased", ",", "low", "variance", "estimators", ".", "we", "compare", "these", "estimators", "by", "their", "error", "in", "pairwise", "prediction", "(", "which", "generation", "system", "is", "better", "?", ")", "using", "the", "bootstrap", ".", "measuring", "this", "error", "is", "complicated", ":", "predictions", "are", "evaluated", "against", "noisy", ",", "human", "predicted", "labels", "instead", "of", "the", "ground", "truth", ",", "and", "metric", "predictions", "fluctuate", "based", "on", "the", "test", "sets", "they", "were", "calculated", "on", ".", "by", "applying", "a", "bias", "-", "variance", "-", "noise", "decomposition", ",", "we", "adjust", "this", "error", "to", "a", "noise", "-", "free", ",", "infinite", "test", "set", "setting", ".", "our", "analysis", "compares", "the", "adjusted", "error", "of", "metrics", "to", "humans", "and", "a", "derived", ",", "perfect", "segment", "-", "level", "annotator", ",", "both", "of", "which", "are", "unbiased", "estimators", "dependent", "on", "the", "number", "of", "judgments", "collected", ".", "in", "mt", ",", "we", "identify", "two", "settings", "where", "metrics", "outperform", "humans", "due", "to", "a", "statistical", "advantage", "in", "variance", ":", "when", "the", "number", "of", "human", "judgments", "used", "is", "small", ",", "and", "when", "the", "quality", "difference", "between", "compared", "systems", "is", "small", "."]}, {"venue": "ACL", "title": "What Ingredients Make for an Effective Crowdsourcing Protocol for Difficult NLU Data Collection Tasks?", "abstract": "Crowdsourcing is widely used to create data for common natural language understanding tasks. Despite the importance of these datasets for measuring and refining model understanding of language, there has been little focus on the crowdsourcing methods used for collecting the datasets. In this paper, we compare the efficacy of interventions that have been proposed in prior work as ways of improving data quality. We use multiple-choice question answering as a testbed and run a randomized trial by assigning crowdworkers to write questions under one of four different data collection protocols. We find that asking workers to write explanations for their examples is an ineffective stand-alone strategy for boosting NLU example difficulty. However, we find that training crowdworkers, and then using an iterative process of collecting data, sending feedback, and qualifying workers based on expert judgments is an effective means of collecting challenging data. But using crowdsourced, instead of expert judgments, to qualify workers and send feedback does not prove to be effective. We observe that the data from the iterative protocol with expert assessments is more challenging by several measures. Notably, the human\u2013model gap on the unanimous agreement portion of this data is, on average, twice as large as the gap for the baseline protocol data.", "doc_id": "8f34f80235ac8e047bc601a9444aa519", "publication_year": 2021, "sentences": ["crowdsourcing is widely used to create data for common natural language understanding tasks .", "despite the importance of these datasets for measuring and refining model understanding of language , there has been little focus on the crowdsourcing methods used for collecting the datasets .", "in this paper , we compare the efficacy of interventions that have been proposed in prior work as ways of improving data quality .", "we use multiple - choice question answering as a testbed and run a randomized trial by assigning crowdworkers to write questions under one of four different data collection protocols .", "we find that asking workers to write explanations for their examples is an ineffective stand - alone strategy for boosting nlu example difficulty .", "however , we find that training crowdworkers , and then using an iterative process of collecting data , sending feedback , and qualifying workers based on expert judgments is an effective means of collecting challenging data .", "but using crowdsourced , instead of expert judgments , to qualify workers and send feedback does not prove to be effective .", "we observe that the data from the iterative protocol with expert assessments is more challenging by several measures .", "notably , the human \u2013 model gap on the unanimous agreement portion of this data is , on average , twice as large as the gap for the baseline protocol data ."], "events": [{"event_type": "RWF", "arguments": [{"text": "little focus", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["little", "focus"], "offsets": [32, 33]}, {"text": "crowdsourcing methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["crowdsourcing", "methods"], "offsets": [36, 37]}], "trigger": {"text": "little focus", "tokens": ["little", "focus"], "offsets": [32, 33]}}, {"event_type": "PUR", "arguments": [{"text": "datasets", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["datasets"], "offsets": [42]}], "trigger": {"text": "collecting", "tokens": ["collecting"], "offsets": [40]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [48]}, {"text": "efficacy of interventions", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["efficacy", "of", "interventions"], "offsets": [51, 52, 53]}, {"text": "improving", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improving"], "offsets": [64]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [49]}}, {"event_type": "PUR", "arguments": [{"text": "data quality", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["data", "quality"], "offsets": [65, 66]}], "trigger": {"text": "improving", "tokens": ["improving"], "offsets": [64]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [68]}, {"text": "multiple - choice question answering", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multiple", "-", "choice", "question", "answering"], "offsets": [70, 71, 72, 73, 74]}, {"text": "testbed", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["testbed"], "offsets": [77]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [69]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [68]}, {"text": "randomized trial", "nugget_type": "APP", "argument_type": "Content", "tokens": ["randomized", "trial"], "offsets": [81, 82]}], "trigger": {"text": "run", "tokens": ["run"], "offsets": [79]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [98]}, {"text": "ineffective", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["ineffective"], "offsets": [111]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [99]}}, {"event_type": "FAC", "arguments": [{"text": "asking workers to write explanations for their examples", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["asking", "workers", "to", "write", "explanations", "for", "their", "examples"], "offsets": [101, 102, 103, 104, 105, 106, 107, 108]}, {"text": "stand - alone strategy", "nugget_type": "APP", "argument_type": "Object", "tokens": ["stand", "-", "alone", "strategy"], "offsets": [112, 113, 114, 115]}, {"text": "boosting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["boosting"], "offsets": [117]}], "trigger": {"text": "ineffective", "tokens": ["ineffective"], "offsets": [111]}}, {"event_type": "PUR", "arguments": [{"text": "nlu example difficulty", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["nlu", "example", "difficulty"], "offsets": [118, 119, 120]}], "trigger": {"text": "boosting", "tokens": ["boosting"], "offsets": [117]}}, {"event_type": "FIN", "arguments": [{"text": "effective", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["effective"], "offsets": [152]}, {"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [124]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [125]}}, {"event_type": "FAC", "arguments": [{"text": "training crowdworkers", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["training", "crowdworkers"], "offsets": [127, 128]}, {"text": "iterative process of collecting data", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["iterative", "process", "of", "collecting", "data"], "offsets": [134, 135, 136, 137, 138]}, {"text": "sending feedback", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["sending", "feedback"], "offsets": [140, 141]}, {"text": "qualifying workers based on expert judgments", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["qualifying", "workers", "based", "on", "expert", "judgments"], "offsets": [144, 145, 146, 147, 148, 149]}, {"text": "means", "nugget_type": "APP", "argument_type": "Object", "tokens": ["means"], "offsets": [153]}, {"text": "collecting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["collecting"], "offsets": [155]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [152]}}, {"event_type": "PUR", "arguments": [{"text": "challenging data", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["challenging", "data"], "offsets": [156, 157]}], "trigger": {"text": "collecting", "tokens": ["collecting"], "offsets": [155]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [181]}, {"text": "challenging", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["challenging"], "offsets": [195]}], "trigger": {"text": "observe", "tokens": ["observe"], "offsets": [182]}}, {"event_type": "FAC", "arguments": [{"text": "data from the iterative protocol", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["data", "from", "the", "iterative", "protocol"], "offsets": [185, 186, 187, 188, 189]}, {"text": "with expert assessments", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "expert", "assessments"], "offsets": [190, 191, 192]}, {"text": "more", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["more"], "offsets": [194]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [195]}}, {"event_type": "CMP", "arguments": [{"text": "human \u2013 model gap", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["human", "\u2013", "model", "gap"], "offsets": [203, 204, 205, 206]}, {"text": "unanimous agreement portion of this data", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["unanimous", "agreement", "portion", "of", "this", "data"], "offsets": [209, 210, 211, 212, 213, 214]}, {"text": "twice", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["twice"], "offsets": [220]}, {"text": "gap for the baseline protocol data", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["gap", "for", "the", "baseline", "protocol", "data"], "offsets": [225, 226, 227, 228, 229, 230]}], "trigger": {"text": "as large as", "tokens": ["as", "large", "as"], "offsets": [221, 222, 223]}}], "document": ["crowdsourcing", "is", "widely", "used", "to", "create", "data", "for", "common", "natural", "language", "understanding", "tasks", ".", "despite", "the", "importance", "of", "these", "datasets", "for", "measuring", "and", "refining", "model", "understanding", "of", "language", ",", "there", "has", "been", "little", "focus", "on", "the", "crowdsourcing", "methods", "used", "for", "collecting", "the", "datasets", ".", "in", "this", "paper", ",", "we", "compare", "the", "efficacy", "of", "interventions", "that", "have", "been", "proposed", "in", "prior", "work", "as", "ways", "of", "improving", "data", "quality", ".", "we", "use", "multiple", "-", "choice", "question", "answering", "as", "a", "testbed", "and", "run", "a", "randomized", "trial", "by", "assigning", "crowdworkers", "to", "write", "questions", "under", "one", "of", "four", "different", "data", "collection", "protocols", ".", "we", "find", "that", "asking", "workers", "to", "write", "explanations", "for", "their", "examples", "is", "an", "ineffective", "stand", "-", "alone", "strategy", "for", "boosting", "nlu", "example", "difficulty", ".", "however", ",", "we", "find", "that", "training", "crowdworkers", ",", "and", "then", "using", "an", "iterative", "process", "of", "collecting", "data", ",", "sending", "feedback", ",", "and", "qualifying", "workers", "based", "on", "expert", "judgments", "is", "an", "effective", "means", "of", "collecting", "challenging", "data", ".", "but", "using", "crowdsourced", ",", "instead", "of", "expert", "judgments", ",", "to", "qualify", "workers", "and", "send", "feedback", "does", "not", "prove", "to", "be", "effective", ".", "we", "observe", "that", "the", "data", "from", "the", "iterative", "protocol", "with", "expert", "assessments", "is", "more", "challenging", "by", "several", "measures", ".", "notably", ",", "the", "human", "\u2013", "model", "gap", "on", "the", "unanimous", "agreement", "portion", "of", "this", "data", "is", ",", "on", "average", ",", "twice", "as", "large", "as", "the", "gap", "for", "the", "baseline", "protocol", "data", "."]}, {"venue": "ACL", "title": "Three Sentences Are All You Need: Local Path Enhanced Document Relation Extraction", "abstract": "Document-level Relation Extraction (RE) is a more challenging task than sentence RE as it often requires reasoning over multiple sentences. Yet, human annotators usually use a small number of sentences to identify the relationship between a given entity pair. In this paper, we present an embarrassingly simple but effective method to heuristically select evidence sentences for document-level RE, which can be easily combined with BiLSTM to achieve good performance on benchmark datasets, even better than fancy graph neural network based methods. We have released our code at https://github.com/AndrewZhe/Three-Sentences-Are-All-You-Need.", "doc_id": "a7cfb2f40aa533c78e59552093c5bd44", "publication_year": 2021, "sentences": ["document - level relation extraction ( re ) is a more challenging task than sentence re as it often requires reasoning over multiple sentences .", "yet , human annotators usually use a small number of sentences to identify the relationship between a given entity pair .", "in this paper , we present an embarrassingly simple but effective method to heuristically select evidence sentences for document - level re , which can be easily combined with bilstm to achieve good performance on benchmark datasets , even better than fancy graph neural network based methods .", "we have released our code at https : / / github . com / andrewzhe / three - sentences - are - all - you - need ."], "events": [{"event_type": "ITT", "arguments": [{"text": "document - level relation extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["document", "-", "level", "relation", "extraction"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "more challenging task", "tokens": ["more", "challenging", "task"], "offsets": [10, 11, 12]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [50]}, {"text": "heuristically select", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["heuristically", "select"], "offsets": [59, 60]}, {"text": "document - level re", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["document", "-", "level", "relation", "extraction"], "offsets": [64, 65, 66, 3, 4]}, {"text": "effective method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["effective", "method"], "offsets": [56, 57]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [51]}}, {"event_type": "PUR", "arguments": [{"text": "evidence sentences", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["evidence", "sentences"], "offsets": [61, 62]}], "trigger": {"text": "heuristically select", "tokens": ["heuristically", "select"], "offsets": [59, 60]}}, {"event_type": "CMP", "arguments": [{"text": "effective method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["effective", "method"], "offsets": [56, 57]}, {"text": "good performance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["good", "performance"], "offsets": [78, 79]}, {"text": "benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["benchmark", "datasets"], "offsets": [81, 82]}, {"text": "fancy graph neural network based methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["fancy", "graph", "neural", "network", "based", "methods"], "offsets": [87, 88, 89, 90, 91, 92]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [77]}}, {"event_type": "WKS", "arguments": [{"text": "bilstm", "nugget_type": "APP", "argument_type": "Content", "tokens": ["bilstm"], "offsets": [75]}], "trigger": {"text": "easily combined", "tokens": ["easily", "combined"], "offsets": [72, 73]}}], "document": ["document", "-", "level", "relation", "extraction", "(", "re", ")", "is", "a", "more", "challenging", "task", "than", "sentence", "re", "as", "it", "often", "requires", "reasoning", "over", "multiple", "sentences", ".", "yet", ",", "human", "annotators", "usually", "use", "a", "small", "number", "of", "sentences", "to", "identify", "the", "relationship", "between", "a", "given", "entity", "pair", ".", "in", "this", "paper", ",", "we", "present", "an", "embarrassingly", "simple", "but", "effective", "method", "to", "heuristically", "select", "evidence", "sentences", "for", "document", "-", "level", "re", ",", "which", "can", "be", "easily", "combined", "with", "bilstm", "to", "achieve", "good", "performance", "on", "benchmark", "datasets", ",", "even", "better", "than", "fancy", "graph", "neural", "network", "based", "methods", ".", "we", "have", "released", "our", "code", "at", "https", ":", "/", "/", "github", ".", "com", "/", "andrewzhe", "/", "three", "-", "sentences", "-", "are", "-", "all", "-", "you", "-", "need", "."]}, {"venue": "ACL", "title": "Attention Guided Graph Convolutional Networks for Relation Extraction", "abstract": "Dependency trees convey rich structural information that is proven useful for extracting relations among entities in text. However, how to effectively make use of relevant information while ignoring irrelevant information from the dependency trees remains a challenging research question. Existing approaches employing rule based hard-pruning strategies for selecting relevant partial dependency structures may not always yield optimal results. In this work, we propose Attention Guided Graph Convolutional Networks (AGGCNs), a novel model which directly takes full dependency trees as inputs. Our model can be understood as a soft-pruning approach that automatically learns how to selectively attend to the relevant sub-structures useful for the relation extraction task. Extensive results on various tasks including cross-sentence n-ary relation extraction and large-scale sentence-level relation extraction show that our model is able to better leverage the structural information of the full dependency trees, giving significantly better results than previous approaches.", "doc_id": "9d4a8542869ed5a46b3e539199061671", "publication_year": 2019, "sentences": ["dependency trees convey rich structural information that is proven useful for extracting relations among entities in text .", "however , how to effectively make use of relevant information while ignoring irrelevant information from the dependency trees remains a challenging research question .", "existing approaches employing rule based hard - pruning strategies for selecting relevant partial dependency structures may not always yield optimal results .", "in this work , we propose attention guided graph convolutional networks ( aggcns ) , a novel model which directly takes full dependency trees as inputs .", "our model can be understood as a soft - pruning approach that automatically learns how to selectively attend to the relevant sub - structures useful for the relation extraction task .", "extensive results on various tasks including cross - sentence n - ary relation extraction and large - scale sentence - level relation extraction show that our model is able to better leverage the structural information of the full dependency trees , giving significantly better results than previous approaches ."], "events": [{"event_type": "ITT", "arguments": [{"text": "dependency trees", "nugget_type": "APP", "argument_type": "Target", "tokens": ["dependency", "trees"], "offsets": [0, 1]}, {"text": "extracting relations among entities in text", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["extracting", "relations", "among", "entities", "in", "text"], "offsets": [11, 12, 13, 14, 15, 16]}], "trigger": {"text": "convey", "tokens": ["convey"], "offsets": [2]}}, {"event_type": "RWS", "arguments": [{"text": "existing approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "approaches"], "offsets": [42, 43]}, {"text": "selecting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["selecting"], "offsets": [52]}, {"text": "rule based hard - pruning strategies", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["rule", "based", "hard", "-", "pruning", "strategies"], "offsets": [45, 46, 47, 48, 49, 50]}], "trigger": {"text": "employing", "tokens": ["employing"], "offsets": [44]}}, {"event_type": "PUR", "arguments": [{"text": "relevant partial dependency structures", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["relevant", "partial", "dependency", "structures"], "offsets": [53, 54, 55, 56]}], "trigger": {"text": "selecting", "tokens": ["selecting"], "offsets": [52]}}, {"event_type": "RWF", "arguments": [{"text": "existing approaches", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "approaches"], "offsets": [42, 43]}, {"text": "not always yield", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "always", "yield"], "offsets": [58, 59, 60]}], "trigger": {"text": "not always yield", "tokens": ["not", "always", "yield"], "offsets": [58, 59, 60]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [68]}, {"text": "attention guided graph convolutional networks", "nugget_type": "APP", "argument_type": "Content", "tokens": ["attention", "guided", "graph", "convolutional", "networks"], "offsets": [70, 71, 72, 73, 74]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [69]}}, {"event_type": "MDS", "arguments": [{"text": "relation extraction task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["relation", "extraction", "task"], "offsets": [118, 119, 120]}, {"text": "relevant sub - structures", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["relevant", "sub", "-", "structures"], "offsets": [111, 112, 113, 114]}], "trigger": {"text": "automatically learns", "tokens": ["automatically", "learns"], "offsets": [103, 104]}}, {"event_type": "FIN", "arguments": [{"text": "leverage", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["leverage"], "offsets": [153]}, {"text": "giving", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["giving"], "offsets": [163]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [145]}}, {"event_type": "FAC", "arguments": [{"text": "structural information of the full dependency trees", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["structural", "information", "of", "the", "full", "dependency", "trees"], "offsets": [155, 156, 157, 158, 159, 160, 161]}, {"text": "attention guided graph convolutional networks", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["attention", "guided", "graph", "convolutional", "networks"], "offsets": [70, 71, 72, 73, 74]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [153]}}, {"event_type": "CMP", "arguments": [{"text": "attention guided graph convolutional networks", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["attention", "guided", "graph", "convolutional", "networks"], "offsets": [70, 71, 72, 73, 74]}, {"text": "previous approaches", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "approaches"], "offsets": [168, 169]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [164]}, {"text": "better results", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better", "results"], "offsets": [165, 166]}], "trigger": {"text": "giving", "tokens": ["giving"], "offsets": [163]}}], "document": ["dependency", "trees", "convey", "rich", "structural", "information", "that", "is", "proven", "useful", "for", "extracting", "relations", "among", "entities", "in", "text", ".", "however", ",", "how", "to", "effectively", "make", "use", "of", "relevant", "information", "while", "ignoring", "irrelevant", "information", "from", "the", "dependency", "trees", "remains", "a", "challenging", "research", "question", ".", "existing", "approaches", "employing", "rule", "based", "hard", "-", "pruning", "strategies", "for", "selecting", "relevant", "partial", "dependency", "structures", "may", "not", "always", "yield", "optimal", "results", ".", "in", "this", "work", ",", "we", "propose", "attention", "guided", "graph", "convolutional", "networks", "(", "aggcns", ")", ",", "a", "novel", "model", "which", "directly", "takes", "full", "dependency", "trees", "as", "inputs", ".", "our", "model", "can", "be", "understood", "as", "a", "soft", "-", "pruning", "approach", "that", "automatically", "learns", "how", "to", "selectively", "attend", "to", "the", "relevant", "sub", "-", "structures", "useful", "for", "the", "relation", "extraction", "task", ".", "extensive", "results", "on", "various", "tasks", "including", "cross", "-", "sentence", "n", "-", "ary", "relation", "extraction", "and", "large", "-", "scale", "sentence", "-", "level", "relation", "extraction", "show", "that", "our", "model", "is", "able", "to", "better", "leverage", "the", "structural", "information", "of", "the", "full", "dependency", "trees", ",", "giving", "significantly", "better", "results", "than", "previous", "approaches", "."]}, {"venue": "ACL", "title": "A Dataset and Baselines for Multilingual Reply Suggestion", "abstract": "Reply suggestion models help users process emails and chats faster. Previous work only studies English reply suggestion. Instead, we present MRS, a multilingual reply suggestion dataset with ten languages. MRS can be used to compare two families of models: 1) retrieval models that select the reply from a fixed set and 2) generation models that produce the reply from scratch. Therefore, MRS complements existing cross-lingual generalization benchmarks that focus on classification and sequence labeling tasks. We build a generation model and a retrieval model as baselines for MRS. The two models have different strengths in the monolingual setting, and they require different strategies to generalize across languages. MRS is publicly available at https://github.com/zhangmozhi/mrs.", "doc_id": "e5ce08e170e82f62c1a36c268bed1c21", "publication_year": 2021, "sentences": ["reply suggestion models help users process emails and chats faster .", "previous work only studies english reply suggestion .", "instead , we present mrs , a multilingual reply suggestion dataset with ten languages .", "mrs can be used to compare two families of models : 1 ) retrieval models that select the reply from a fixed set and 2 ) generation models that produce the reply from scratch .", "therefore , mrs complements existing cross - lingual generalization benchmarks that focus on classification and sequence labeling tasks .", "we build a generation model and a retrieval model as baselines for mrs .", "the two models have different strengths in the monolingual setting , and they require different strategies to generalize across languages .", "mrs is publicly available at https : / / github . com / zhangmozhi / mrs ."], "events": [{"event_type": "ITT", "arguments": [{"text": "reply suggestion models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["reply", "suggestion", "models"], "offsets": [0, 1, 2]}], "trigger": {"text": "help", "tokens": ["help"], "offsets": [3]}}, {"event_type": "RWF", "arguments": [{"text": "previous work", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["previous", "work"], "offsets": [11, 12]}, {"text": "english reply suggestion", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["english", "reply", "suggestion"], "offsets": [15, 16, 17]}], "trigger": {"text": "only studies", "tokens": ["only", "studies"], "offsets": [13, 14]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [21]}, {"text": "multilingual reply suggestion dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["multilingual", "reply", "suggestion", "dataset"], "offsets": [26, 27, 28, 29]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [22]}}, {"event_type": "WKS", "arguments": [{"text": "mrs", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["mrs"], "offsets": [34]}, {"text": "compare", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["compare"], "offsets": [39]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [37]}}, {"event_type": "WKS", "arguments": [{"text": "existing cross - lingual generalization benchmarks", "nugget_type": "APP", "argument_type": "Content", "tokens": ["existing", "cross", "-", "lingual", "generalization", "benchmarks"], "offsets": [73, 74, 75, 76, 77, 78]}, {"text": "classification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["classification"], "offsets": [82]}, {"text": "sequence labeling tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sequence", "labeling", "tasks"], "offsets": [84, 85, 86]}], "trigger": {"text": "focus", "tokens": ["focus"], "offsets": [80]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [88]}, {"text": "generation model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["generation", "model"], "offsets": [91, 92]}, {"text": "retrieval model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["retrieval", "model"], "offsets": [95, 96]}, {"text": "baselines for mrs", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["baselines", "for", "mrs"], "offsets": [98, 99, 100]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [89]}}, {"event_type": "FAC", "arguments": [{"text": "in the monolingual setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "monolingual", "setting"], "offsets": [108, 109, 110, 111]}, {"text": "different strengths", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["different", "strengths"], "offsets": [106, 107]}, {"text": "two models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["a", "generation", "model", "retrieval", "model"], "offsets": [90, 91, 92, 95, 96]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [105]}}, {"event_type": "FAC", "arguments": [{"text": "generalize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generalize"], "offsets": [119]}, {"text": "different strategies", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["different", "strategies"], "offsets": [116, 117]}], "trigger": {"text": "require", "tokens": ["require"], "offsets": [115]}}, {"event_type": "PUR", "arguments": [{"text": "languages", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["languages"], "offsets": [121]}], "trigger": {"text": "generalize", "tokens": ["generalize"], "offsets": [119]}}, {"event_type": "PUR", "arguments": [{"text": "retrieval models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["retrieval", "models"], "offsets": [47, 48]}, {"text": "generation models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["generation", "models"], "offsets": [60, 61]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [39]}}], "document": ["reply", "suggestion", "models", "help", "users", "process", "emails", "and", "chats", "faster", ".", "previous", "work", "only", "studies", "english", "reply", "suggestion", ".", "instead", ",", "we", "present", "mrs", ",", "a", "multilingual", "reply", "suggestion", "dataset", "with", "ten", "languages", ".", "mrs", "can", "be", "used", "to", "compare", "two", "families", "of", "models", ":", "1", ")", "retrieval", "models", "that", "select", "the", "reply", "from", "a", "fixed", "set", "and", "2", ")", "generation", "models", "that", "produce", "the", "reply", "from", "scratch", ".", "therefore", ",", "mrs", "complements", "existing", "cross", "-", "lingual", "generalization", "benchmarks", "that", "focus", "on", "classification", "and", "sequence", "labeling", "tasks", ".", "we", "build", "a", "generation", "model", "and", "a", "retrieval", "model", "as", "baselines", "for", "mrs", ".", "the", "two", "models", "have", "different", "strengths", "in", "the", "monolingual", "setting", ",", "and", "they", "require", "different", "strategies", "to", "generalize", "across", "languages", ".", "mrs", "is", "publicly", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "zhangmozhi", "/", "mrs", "."]}, {"venue": "ACL", "title": "UXLA: A Robust Unsupervised Data Augmentation Framework for Zero-Resource Cross-Lingual NLP", "abstract": "Transfer learning has yielded state-of-the-art (SoTA) results in many supervised NLP tasks. However, annotated data for every target task in every target language is rare, especially for low-resource languages. We propose UXLA, a novel unsupervised data augmentation framework for zero-resource transfer learning scenarios. In particular, UXLA aims to solve cross-lingual adaptation problems from a source language task distribution to an unknown target language task distribution, assuming no training label in the target language. At its core, UXLA performs simultaneous self-training with data augmentation and unsupervised sample selection. To show its effectiveness, we conduct extensive experiments on three diverse zero-resource cross-lingual transfer tasks. UXLA achieves SoTA results in all the tasks, outperforming the baselines by a good margin. With an in-depth framework dissection, we demonstrate the cumulative contributions of different components to its success.", "doc_id": "54b40d7610095d7dbe1d4c1fa1f7b060", "publication_year": 2021, "sentences": ["transfer learning has yielded state - of - the - art ( sota ) results in many supervised nlp tasks .", "however , annotated data for every target task in every target language is rare , especially for low - resource languages .", "we propose uxla , a novel unsupervised data augmentation framework for zero - resource transfer learning scenarios .", "in particular , uxla aims to solve cross - lingual adaptation problems from a source language task distribution to an unknown target language task distribution , assuming no training label in the target language .", "at its core , uxla performs simultaneous self - training with data augmentation and unsupervised sample selection .", "to show its effectiveness , we conduct extensive experiments on three diverse zero - resource cross - lingual transfer tasks .", "uxla achieves sota results in all the tasks , outperforming the baselines by a good margin .", "with an in - depth framework dissection , we demonstrate the cumulative contributions of different components to its success ."], "events": [{"event_type": "ITT", "arguments": [{"text": "transfer learning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["transfer", "learning"], "offsets": [0, 1]}], "trigger": {"text": "yielded", "tokens": ["yielded"], "offsets": [3]}}, {"event_type": "RWF", "arguments": [{"text": "annotated data", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["annotated", "data"], "offsets": [23, 24]}, {"text": "target task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["target", "task"], "offsets": [27, 28]}, {"text": "in every target language", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "every", "target", "language"], "offsets": [29, 30, 31, 32]}], "trigger": {"text": "rare", "tokens": ["rare"], "offsets": [34]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [43]}, {"text": "uxla", "nugget_type": "APP", "argument_type": "Content", "tokens": ["uxla"], "offsets": [45]}, {"text": "zero - resource transfer learning scenarios", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["zero", "-", "resource", "transfer", "learning", "scenarios"], "offsets": [54, 55, 56, 57, 58, 59]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [44]}}, {"event_type": "WKS", "arguments": [{"text": "assuming no training label in the target language", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["assuming", "no", "training", "label", "in", "the", "target", "language"], "offsets": [87, 88, 89, 90, 91, 92, 93, 94]}, {"text": "cross - lingual adaptation problems", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["cross", "-", "lingual", "adaptation", "problems"], "offsets": [68, 69, 70, 71, 72]}], "trigger": {"text": "solve", "tokens": ["solve"], "offsets": [67]}}, {"event_type": "WKS", "arguments": [{"text": "data augmentation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["data", "augmentation"], "offsets": [107, 108]}, {"text": "unsupervised sample selection", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["unsupervised", "sample", "selection"], "offsets": [110, 111, 112]}, {"text": "simultaneous self - training", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["simultaneous", "self", "-", "training"], "offsets": [102, 103, 104, 105]}], "trigger": {"text": "performs", "tokens": ["performs"], "offsets": [101]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [119]}, {"text": "on three diverse zero - resource cross - lingual transfer tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "three", "diverse", "zero", "-", "resource", "cross", "-", "lingual", "transfer", "tasks"], "offsets": [123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133]}, {"text": "show", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["show"], "offsets": [115]}, {"text": "extensive experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["extensive", "experiments"], "offsets": [121, 122]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [120]}}, {"event_type": "PUR", "arguments": [{"text": "effectiveness", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["effectiveness"], "offsets": [117]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [115]}}, {"event_type": "FAC", "arguments": [{"text": "uxla", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["uxla"], "offsets": [135]}, {"text": "in all the tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "all", "the", "tasks"], "offsets": [139, 140, 141, 142]}, {"text": "sota results", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["sota", "results"], "offsets": [137, 138]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [136]}}, {"event_type": "CMP", "arguments": [{"text": "uxla", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["uxla"], "offsets": [135]}, {"text": "outperforming", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforming"], "offsets": [144]}, {"text": "baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baselines"], "offsets": [146]}], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [144]}}], "document": ["transfer", "learning", "has", "yielded", "state", "-", "of", "-", "the", "-", "art", "(", "sota", ")", "results", "in", "many", "supervised", "nlp", "tasks", ".", "however", ",", "annotated", "data", "for", "every", "target", "task", "in", "every", "target", "language", "is", "rare", ",", "especially", "for", "low", "-", "resource", "languages", ".", "we", "propose", "uxla", ",", "a", "novel", "unsupervised", "data", "augmentation", "framework", "for", "zero", "-", "resource", "transfer", "learning", "scenarios", ".", "in", "particular", ",", "uxla", "aims", "to", "solve", "cross", "-", "lingual", "adaptation", "problems", "from", "a", "source", "language", "task", "distribution", "to", "an", "unknown", "target", "language", "task", "distribution", ",", "assuming", "no", "training", "label", "in", "the", "target", "language", ".", "at", "its", "core", ",", "uxla", "performs", "simultaneous", "self", "-", "training", "with", "data", "augmentation", "and", "unsupervised", "sample", "selection", ".", "to", "show", "its", "effectiveness", ",", "we", "conduct", "extensive", "experiments", "on", "three", "diverse", "zero", "-", "resource", "cross", "-", "lingual", "transfer", "tasks", ".", "uxla", "achieves", "sota", "results", "in", "all", "the", "tasks", ",", "outperforming", "the", "baselines", "by", "a", "good", "margin", ".", "with", "an", "in", "-", "depth", "framework", "dissection", ",", "we", "demonstrate", "the", "cumulative", "contributions", "of", "different", "components", "to", "its", "success", "."]}, {"venue": "ACL", "title": "A Corpus for Large-Scale Phonetic Typology", "abstract": "A major hurdle in data-driven research on typology is having sufficient data in many languages to draw meaningful conclusions. We present VoxClamantis v1.0, the first large-scale corpus for phonetic typology, with aligned segments and estimated phoneme-level labels in 690 readings spanning 635 languages, along with acoustic-phonetic measures of vowels and sibilants. Access to such data can greatly facilitate investigation of phonetic typology at a large scale and across many languages. However, it is non-trivial and computationally intensive to obtain such alignments for hundreds of languages, many of which have few to no resources presently available. We describe the methodology to create our corpus, discuss caveats with current methods and their impact on the utility of this data, and illustrate possible research directions through a series of case studies on the 48 highest-quality readings. Our corpus and scripts are publicly available for non-commercial use at https://voxclamantisproject.github.io.", "doc_id": "55ad12dc61caa3bdd91f6f0484f7a1e1", "publication_year": 2020, "sentences": ["a major hurdle in data - driven research on typology is having sufficient data in many languages to draw meaningful conclusions .", "we present voxclamantis v1 . 0 , the first large - scale corpus for phonetic typology , with aligned segments and estimated phoneme - level labels in 690 readings spanning 635 languages , along with acoustic - phonetic measures of vowels and sibilants .", "access to such data can greatly facilitate investigation of phonetic typology at a large scale and across many languages .", "however , it is non - trivial and computationally intensive to obtain such alignments for hundreds of languages , many of which have few to no resources presently available .", "we describe the methodology to create our corpus , discuss caveats with current methods and their impact on the utility of this data , and illustrate possible research directions through a series of case studies on the 48 highest - quality readings .", "our corpus and scripts are publicly available for non - commercial use at https : / / voxclamantisproject . github . io ."], "events": [{"event_type": "RWF", "arguments": [{"text": "data - driven research", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["data", "-", "driven", "research"], "offsets": [4, 5, 6, 7]}, {"text": "draw", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["draw"], "offsets": [18]}, {"text": "hurdle", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["hurdle"], "offsets": [2]}], "trigger": {"text": "hurdle", "tokens": ["hurdle"], "offsets": [2]}}, {"event_type": "PUR", "arguments": [{"text": "meaningful conclusions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["meaningful", "conclusions"], "offsets": [19, 20]}], "trigger": {"text": "draw", "tokens": ["draw"], "offsets": [18]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [22]}, {"text": "voxclamantis v1 . 0", "nugget_type": "DST", "argument_type": "Content", "tokens": ["voxclamantis", "v1", ".", "0"], "offsets": [24, 25, 26, 27]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [23]}}, {"event_type": "FAC", "arguments": [{"text": "access to such data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["access", "to", "such", "data"], "offsets": [66, 67, 68, 69]}, {"text": "investigation of phonetic typology", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["investigation", "of", "phonetic", "typology"], "offsets": [73, 74, 75, 76]}, {"text": "at a large scale and across many languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "a", "large", "scale", "and", "across", "many", "languages"], "offsets": [77, 78, 79, 80, 81, 82, 83, 84]}], "trigger": {"text": "facilitate", "tokens": ["facilitate"], "offsets": [72]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [116]}, {"text": "methodology", "nugget_type": "APP", "argument_type": "Content", "tokens": ["methodology"], "offsets": [119]}, {"text": "create", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["create"], "offsets": [121]}], "trigger": {"text": "describe", "tokens": ["describe"], "offsets": [117]}}, {"event_type": "PUR", "arguments": [{"text": "voxclamantis v1 . 0", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["voxclamantis", "v1", ".", "0"], "offsets": [24, 25, 26, 27]}], "trigger": {"text": "create", "tokens": ["create"], "offsets": [121]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [116]}, {"text": "impact on the utility", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["impact", "on", "the", "utility"], "offsets": [132, 133, 134, 135]}, {"text": "caveats", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["caveats"], "offsets": [126]}], "trigger": {"text": "discuss", "tokens": ["discuss"], "offsets": [125]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [116]}, {"text": "possible research directions", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["possible", "research", "directions"], "offsets": [142, 143, 144]}], "trigger": {"text": "illustrate", "tokens": ["illustrate"], "offsets": [141]}}], "document": ["a", "major", "hurdle", "in", "data", "-", "driven", "research", "on", "typology", "is", "having", "sufficient", "data", "in", "many", "languages", "to", "draw", "meaningful", "conclusions", ".", "we", "present", "voxclamantis", "v1", ".", "0", ",", "the", "first", "large", "-", "scale", "corpus", "for", "phonetic", "typology", ",", "with", "aligned", "segments", "and", "estimated", "phoneme", "-", "level", "labels", "in", "690", "readings", "spanning", "635", "languages", ",", "along", "with", "acoustic", "-", "phonetic", "measures", "of", "vowels", "and", "sibilants", ".", "access", "to", "such", "data", "can", "greatly", "facilitate", "investigation", "of", "phonetic", "typology", "at", "a", "large", "scale", "and", "across", "many", "languages", ".", "however", ",", "it", "is", "non", "-", "trivial", "and", "computationally", "intensive", "to", "obtain", "such", "alignments", "for", "hundreds", "of", "languages", ",", "many", "of", "which", "have", "few", "to", "no", "resources", "presently", "available", ".", "we", "describe", "the", "methodology", "to", "create", "our", "corpus", ",", "discuss", "caveats", "with", "current", "methods", "and", "their", "impact", "on", "the", "utility", "of", "this", "data", ",", "and", "illustrate", "possible", "research", "directions", "through", "a", "series", "of", "case", "studies", "on", "the", "48", "highest", "-", "quality", "readings", ".", "our", "corpus", "and", "scripts", "are", "publicly", "available", "for", "non", "-", "commercial", "use", "at", "https", ":", "/", "/", "voxclamantisproject", ".", "github", ".", "io", "."]}, {"venue": "ACL", "title": "Beyond BLEU:Training Neural Machine Translation with Semantic Similarity", "abstract": "While most neural machine translation (NMT)systems are still trained using maximum likelihood estimation, recent work has demonstrated that optimizing systems to directly improve evaluation metrics such as BLEU can significantly improve final translation accuracy. However, training with BLEU has some limitations: it doesn\u2019t assign partial credit, it has a limited range of output values, and it can penalize semantically correct hypotheses if they differ lexically from the reference. In this paper, we introduce an alternative reward function for optimizing NMT systems that is based on recent work in semantic similarity. We evaluate on four disparate languages trans-lated to English, and find that training with our proposed metric results in better translations as evaluated by BLEU, semantic similarity, and human evaluation, and also that the optimization procedure converges faster. Analysis suggests that this is because the proposed metric is more conducive to optimization, assigning partial credit and providing more diversity in scores than BLEU", "doc_id": "67c005bbbe908b9b9e151f4431cc354f", "publication_year": 2019, "sentences": ["while most neural machine translation ( nmt ) systems are still trained using maximum likelihood estimation , recent work has demonstrated that optimizing systems to directly improve evaluation metrics such as bleu can significantly improve final translation accuracy .", "however , training with bleu has some limitations : it doesn \u2019 t assign partial credit , it has a limited range of output values , and it can penalize semantically correct hypotheses if they differ lexically from the reference .", "in this paper , we introduce an alternative reward function for optimizing nmt systems that is based on recent work in semantic similarity .", "we evaluate on four disparate languages trans - lated to english , and find that training with our proposed metric results in better translations as evaluated by bleu , semantic similarity , and human evaluation , and also that the optimization procedure converges faster .", "analysis suggests that this is because the proposed metric is more conducive to optimization , assigning partial credit and providing more diversity in scores than bleu"], "events": [{"event_type": "ITT", "arguments": [{"text": "neural machine translation ( nmt ) systems", "nugget_type": "APP", "argument_type": "Target", "tokens": ["neural", "machine", "translation", "(", "nmt", ")", "systems"], "offsets": [2, 3, 4, 5, 6, 7, 8]}], "trigger": {"text": "still trained", "tokens": ["still", "trained"], "offsets": [10, 11]}}, {"event_type": "RWS", "arguments": [{"text": "recent work", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["recent", "work"], "offsets": [17, 18]}, {"text": "evaluation metrics", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["evaluation", "metrics"], "offsets": [27, 28]}, {"text": "systems", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["systems"], "offsets": [23]}, {"text": "significantly improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["significantly", "improve"], "offsets": [33, 34]}], "trigger": {"text": "optimizing", "tokens": ["optimizing"], "offsets": [22]}}, {"event_type": "PUR", "arguments": [{"text": "final translation accuracy", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["final", "translation", "accuracy"], "offsets": [35, 36, 37]}], "trigger": {"text": "significantly improve", "tokens": ["significantly", "improve"], "offsets": [33, 34]}}, {"event_type": "RWF", "arguments": [{"text": "training with bleu", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["training", "with", "bleu"], "offsets": [41, 42, 43]}, {"text": "partial credit", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["partial", "credit"], "offsets": [53, 54]}], "trigger": {"text": "doesn \u2019 t assign", "tokens": ["doesn", "\u2019", "t", "assign"], "offsets": [49, 50, 51, 52]}}, {"event_type": "RWF", "arguments": [{"text": "training with bleu", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["training", "with", "bleu"], "offsets": [41, 42, 43]}, {"text": "limited range of output values", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["limited", "range", "of", "output", "values"], "offsets": [59, 60, 61, 62, 63]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [57]}}, {"event_type": "RWF", "arguments": [{"text": "training with bleu", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["training", "with", "bleu"], "offsets": [41, 42, 43]}, {"text": "if they differ lexically from the reference", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["if", "they", "differ", "lexically", "from", "the", "reference"], "offsets": [72, 73, 74, 75, 76, 77, 78]}, {"text": "semantically correct hypotheses", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["semantically", "correct", "hypotheses"], "offsets": [69, 70, 71]}], "trigger": {"text": "penalize", "tokens": ["penalize"], "offsets": [68]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [84]}, {"text": "alternative reward function", "nugget_type": "APP", "argument_type": "Content", "tokens": ["alternative", "reward", "function"], "offsets": [87, 88, 89]}, {"text": "optimizing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["optimizing"], "offsets": [91]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [85]}}, {"event_type": "PUR", "arguments": [{"text": "neural machine translation ( nmt ) systems", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["neural", "machine", "translation", "(", "nmt", ")", "systems"], "offsets": [2, 3, 4, 5, 6, 7, 8]}], "trigger": {"text": "optimizing", "tokens": ["optimizing"], "offsets": [91]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [104]}, {"text": "four disparate languages trans - lated to english", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["four", "disparate", "languages", "trans", "-", "lated", "to", "english"], "offsets": [107, 108, 109, 110, 111, 112, 113, 114]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [105]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [104]}, {"text": "results", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["results"], "offsets": [124]}, {"text": "converges", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["converges"], "offsets": [146]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [117]}}, {"event_type": "CMP", "arguments": [{"text": "training with our proposed metric", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["training", "with", "our", "proposed", "metric"], "offsets": [119, 120, 121, 122, 123]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [126]}, {"text": "translations", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["translations"], "offsets": [127]}, {"text": "evaluated by bleu , semantic similarity , and human evaluation", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["evaluated", "by", "bleu", ",", "semantic", "similarity", ",", "and", "human", "evaluation"], "offsets": [129, 130, 131, 132, 133, 134, 135, 136, 137, 138]}], "trigger": {"text": "results", "tokens": ["results"], "offsets": [124]}}, {"event_type": "CMP", "arguments": [{"text": "optimization procedure", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["optimization", "procedure"], "offsets": [144, 145]}, {"text": "faster", "nugget_type": "STR", "argument_type": "Result", "tokens": ["faster"], "offsets": [147]}], "trigger": {"text": "converges", "tokens": ["converges"], "offsets": [146]}}, {"event_type": "CMP", "arguments": [{"text": "proposed metric", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["proposed", "metric"], "offsets": [156, 157]}], "trigger": {"text": "more conducive", "tokens": ["more", "conducive"], "offsets": [159, 160]}}, {"event_type": "CMP", "arguments": [{"text": "proposed metric", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["proposed", "metric"], "offsets": [156, 157]}, {"text": "more diversity", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "diversity"], "offsets": [169, 170]}, {"text": "scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["scores"], "offsets": [172]}, {"text": "bleu", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["bleu"], "offsets": [174]}], "trigger": {"text": "providing", "tokens": ["providing"], "offsets": [168]}}], "document": ["while", "most", "neural", "machine", "translation", "(", "nmt", ")", "systems", "are", "still", "trained", "using", "maximum", "likelihood", "estimation", ",", "recent", "work", "has", "demonstrated", "that", "optimizing", "systems", "to", "directly", "improve", "evaluation", "metrics", "such", "as", "bleu", "can", "significantly", "improve", "final", "translation", "accuracy", ".", "however", ",", "training", "with", "bleu", "has", "some", "limitations", ":", "it", "doesn", "\u2019", "t", "assign", "partial", "credit", ",", "it", "has", "a", "limited", "range", "of", "output", "values", ",", "and", "it", "can", "penalize", "semantically", "correct", "hypotheses", "if", "they", "differ", "lexically", "from", "the", "reference", ".", "in", "this", "paper", ",", "we", "introduce", "an", "alternative", "reward", "function", "for", "optimizing", "nmt", "systems", "that", "is", "based", "on", "recent", "work", "in", "semantic", "similarity", ".", "we", "evaluate", "on", "four", "disparate", "languages", "trans", "-", "lated", "to", "english", ",", "and", "find", "that", "training", "with", "our", "proposed", "metric", "results", "in", "better", "translations", "as", "evaluated", "by", "bleu", ",", "semantic", "similarity", ",", "and", "human", "evaluation", ",", "and", "also", "that", "the", "optimization", "procedure", "converges", "faster", ".", "analysis", "suggests", "that", "this", "is", "because", "the", "proposed", "metric", "is", "more", "conducive", "to", "optimization", ",", "assigning", "partial", "credit", "and", "providing", "more", "diversity", "in", "scores", "than", "bleu"]}, {"venue": "ACL", "title": "The KnowRef Coreference Corpus: Removing Gender and Number Cues for Difficult Pronominal Anaphora Resolution", "abstract": "We introduce a new benchmark for coreference resolution and NLI, KnowRef, that targets common-sense understanding and world knowledge. Previous coreference resolution tasks can largely be solved by exploiting the number and gender of the antecedents, or have been handcrafted and do not reflect the diversity of naturally occurring text. We present a corpus of over 8,000 annotated text passages with ambiguous pronominal anaphora. These instances are both challenging and realistic. We show that various coreference systems, whether rule-based, feature-rich, or neural, perform significantly worse on the task than humans, who display high inter-annotator agreement. To explain this performance gap, we show empirically that state-of-the art models often fail to capture context, instead relying on the gender or number of candidate antecedents to make a decision. We then use problem-specific insights to propose a data-augmentation trick called antecedent switching to alleviate this tendency in models. Finally, we show that antecedent switching yields promising results on other tasks as well: we use it to achieve state-of-the-art results on the GAP coreference task.", "doc_id": "244d0ce79836e40498fc31cad2f769d2", "publication_year": 2019, "sentences": ["we introduce a new benchmark for coreference resolution and nli , knowref , that targets common - sense understanding and world knowledge .", "previous coreference resolution tasks can largely be solved by exploiting the number and gender of the antecedents , or have been handcrafted and do not reflect the diversity of naturally occurring text .", "we present a corpus of over 8 , 000 annotated text passages with ambiguous pronominal anaphora .", "these instances are both challenging and realistic .", "we show that various coreference systems , whether rule - based , feature - rich , or neural , perform significantly worse on the task than humans , who display high inter - annotator agreement .", "to explain this performance gap , we show empirically that state - of - the art models often fail to capture context , instead relying on the gender or number of candidate antecedents to make a decision .", "we then use problem - specific insights to propose a data - augmentation trick called antecedent switching to alleviate this tendency in models .", "finally , we show that antecedent switching yields promising results on other tasks as well : we use it to achieve state - of - the - art results on the gap coreference task ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "benchmark", "nugget_type": "APP", "argument_type": "Content", "tokens": ["benchmark"], "offsets": [4]}, {"text": "coreference resolution", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["coreference", "resolution"], "offsets": [6, 7]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [1]}}, {"event_type": "RWS", "arguments": [{"text": "number and gender of the antecedents", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["number", "and", "gender", "of", "the", "antecedents"], "offsets": [34, 35, 36, 37, 38, 39]}, {"text": "previous coreference resolution tasks", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "coreference", "resolution", "tasks"], "offsets": [23, 24, 25, 26]}], "trigger": {"text": "exploiting", "tokens": ["exploiting"], "offsets": [32]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [56]}, {"text": "corpus of over 8 , 000 annotated text passages with ambiguous pronominal anaphora", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["corpus", "of", "over", "8", ",", "000", "annotated", "text", "passages", "with", "ambiguous", "pronominal", "anaphora"], "offsets": [59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [57]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [81]}, {"text": "perform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["perform"], "offsets": [100]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [82]}}, {"event_type": "CMP", "arguments": [{"text": "various coreference systems", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["various", "coreference", "systems"], "offsets": [84, 85, 86]}, {"text": "humans", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["humans"], "offsets": [107]}, {"text": "on the task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "task"], "offsets": [103, 104, 105]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [101]}, {"text": "worse", "nugget_type": "WEA", "argument_type": "Result", "tokens": ["worse"], "offsets": [102]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [100]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [123]}, {"text": "relying", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["relying"], "offsets": [141]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [124]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the art models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["state", "-", "of", "-", "the", "art", "models"], "offsets": [127, 128, 129, 130, 131, 132, 133]}, {"text": "gender or number of candidate antecedents", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["gender", "or", "number", "of", "candidate", "antecedents"], "offsets": [144, 145, 146, 147, 148, 149]}], "trigger": {"text": "relying", "tokens": ["relying"], "offsets": [141]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [155]}, {"text": "problem - specific insights", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["problem", "-", "specific", "insights"], "offsets": [158, 159, 160, 161]}, {"text": "propose", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["propose"], "offsets": [163]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [157]}}, {"event_type": "PUR", "arguments": [{"text": "data - augmentation trick", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["data", "-", "augmentation", "trick"], "offsets": [165, 166, 167, 168]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [163]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [181]}, {"text": "yields", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["yields"], "offsets": [186]}, {"text": "achieve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieve"], "offsets": [199]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [182]}}, {"event_type": "FAC", "arguments": [{"text": "promising results", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["promising", "results"], "offsets": [187, 188]}, {"text": "antecedent switching", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["antecedent", "switching"], "offsets": [184, 185]}], "trigger": {"text": "yields", "tokens": ["yields"], "offsets": [186]}}, {"event_type": "FAC", "arguments": [{"text": "antecedent switching", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["antecedent", "switching"], "offsets": [184, 185]}, {"text": "state - of - the - art", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [200, 201, 202, 203, 204, 205, 206]}, {"text": "results", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["results"], "offsets": [207]}, {"text": "on the gap coreference task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "gap", "coreference", "task"], "offsets": [208, 209, 210, 211, 212]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [199]}}], "document": ["we", "introduce", "a", "new", "benchmark", "for", "coreference", "resolution", "and", "nli", ",", "knowref", ",", "that", "targets", "common", "-", "sense", "understanding", "and", "world", "knowledge", ".", "previous", "coreference", "resolution", "tasks", "can", "largely", "be", "solved", "by", "exploiting", "the", "number", "and", "gender", "of", "the", "antecedents", ",", "or", "have", "been", "handcrafted", "and", "do", "not", "reflect", "the", "diversity", "of", "naturally", "occurring", "text", ".", "we", "present", "a", "corpus", "of", "over", "8", ",", "000", "annotated", "text", "passages", "with", "ambiguous", "pronominal", "anaphora", ".", "these", "instances", "are", "both", "challenging", "and", "realistic", ".", "we", "show", "that", "various", "coreference", "systems", ",", "whether", "rule", "-", "based", ",", "feature", "-", "rich", ",", "or", "neural", ",", "perform", "significantly", "worse", "on", "the", "task", "than", "humans", ",", "who", "display", "high", "inter", "-", "annotator", "agreement", ".", "to", "explain", "this", "performance", "gap", ",", "we", "show", "empirically", "that", "state", "-", "of", "-", "the", "art", "models", "often", "fail", "to", "capture", "context", ",", "instead", "relying", "on", "the", "gender", "or", "number", "of", "candidate", "antecedents", "to", "make", "a", "decision", ".", "we", "then", "use", "problem", "-", "specific", "insights", "to", "propose", "a", "data", "-", "augmentation", "trick", "called", "antecedent", "switching", "to", "alleviate", "this", "tendency", "in", "models", ".", "finally", ",", "we", "show", "that", "antecedent", "switching", "yields", "promising", "results", "on", "other", "tasks", "as", "well", ":", "we", "use", "it", "to", "achieve", "state", "-", "of", "-", "the", "-", "art", "results", "on", "the", "gap", "coreference", "task", "."]}, {"venue": "ACL", "title": "DiBiMT: A Novel Benchmark for Measuring Word Sense Disambiguation Biases in Machine Translation", "abstract": "Lexical ambiguity poses one of the greatest challenges in the field of Machine Translation. Over the last few decades, multiple efforts have been undertaken to investigate incorrect translations caused by the polysemous nature of words. Within this body of research, some studies have posited that models pick up semantic biases existing in the training data, thus producing translation errors. In this paper, we present DiBiMT, the first entirely manually-curated evaluation benchmark which enables an extensive study of semantic biases in Machine Translation of nominal and verbal words in five different language combinations, namely, English and one or other of the following languages: Chinese, German, Italian, Russian and Spanish. Furthermore, we test state-of-the-art Machine Translation systems, both commercial and non-commercial ones, against our new test bed and provide a thorough statistical and linguistic analysis of the results. We release DiBiMT at https://nlp.uniroma1.it/dibimt as a closed benchmark with a public leaderboard.", "doc_id": "16720bf74d3bf51fe17c651663696062", "publication_year": 2022, "sentences": ["lexical ambiguity poses one of the greatest challenges in the field of machine translation .", "over the last few decades , multiple efforts have been undertaken to investigate incorrect translations caused by the polysemous nature of words .", "within this body of research , some studies have posited that models pick up semantic biases existing in the training data , thus producing translation errors .", "in this paper , we present dibimt , the first entirely manually - curated evaluation benchmark which enables an extensive study of semantic biases in machine translation of nominal and verbal words in five different language combinations , namely , english and one or other of the following languages : chinese , german , italian , russian and spanish .", "furthermore , we test state - of - the - art machine translation systems , both commercial and non - commercial ones , against our new test bed and provide a thorough statistical and linguistic analysis of the results .", "we release dibimt at https : / / nlp . uniroma1 . it / dibimt as a closed benchmark with a public leaderboard ."], "events": [{"event_type": "ITT", "arguments": [{"text": "lexical ambiguity", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["lexical", "ambiguity"], "offsets": [0, 1]}], "trigger": {"text": "poses", "tokens": ["poses"], "offsets": [2]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [69]}, {"text": "manually - curated evaluation benchmark", "nugget_type": "APP", "argument_type": "Content", "tokens": ["manually", "-", "curated", "evaluation", "benchmark"], "offsets": [76, 77, 78, 79, 80]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [70]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [127]}, {"text": "state - of - the - art machine translation systems", "nugget_type": "APP", "argument_type": "Content", "tokens": ["state", "-", "of", "-", "the", "-", "art", "machine", "translation", "systems"], "offsets": [129, 130, 131, 132, 133, 134, 135, 136, 137, 138]}, {"text": "against our new test bed", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["against", "our", "new", "test", "bed"], "offsets": [148, 149, 150, 151, 152]}], "trigger": {"text": "test", "tokens": ["test"], "offsets": [128]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [127]}, {"text": "thorough statistical and linguistic analysis of the results", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["thorough", "statistical", "and", "linguistic", "analysis", "of", "the", "results"], "offsets": [156, 157, 158, 159, 160, 161, 162, 163]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [154]}}], "document": ["lexical", "ambiguity", "poses", "one", "of", "the", "greatest", "challenges", "in", "the", "field", "of", "machine", "translation", ".", "over", "the", "last", "few", "decades", ",", "multiple", "efforts", "have", "been", "undertaken", "to", "investigate", "incorrect", "translations", "caused", "by", "the", "polysemous", "nature", "of", "words", ".", "within", "this", "body", "of", "research", ",", "some", "studies", "have", "posited", "that", "models", "pick", "up", "semantic", "biases", "existing", "in", "the", "training", "data", ",", "thus", "producing", "translation", "errors", ".", "in", "this", "paper", ",", "we", "present", "dibimt", ",", "the", "first", "entirely", "manually", "-", "curated", "evaluation", "benchmark", "which", "enables", "an", "extensive", "study", "of", "semantic", "biases", "in", "machine", "translation", "of", "nominal", "and", "verbal", "words", "in", "five", "different", "language", "combinations", ",", "namely", ",", "english", "and", "one", "or", "other", "of", "the", "following", "languages", ":", "chinese", ",", "german", ",", "italian", ",", "russian", "and", "spanish", ".", "furthermore", ",", "we", "test", "state", "-", "of", "-", "the", "-", "art", "machine", "translation", "systems", ",", "both", "commercial", "and", "non", "-", "commercial", "ones", ",", "against", "our", "new", "test", "bed", "and", "provide", "a", "thorough", "statistical", "and", "linguistic", "analysis", "of", "the", "results", ".", "we", "release", "dibimt", "at", "https", ":", "/", "/", "nlp", ".", "uniroma1", ".", "it", "/", "dibimt", "as", "a", "closed", "benchmark", "with", "a", "public", "leaderboard", "."]}, {"venue": "ACL", "title": "MC\u02c62: Multi-perspective Convolutional Cube for Conversational Machine Reading Comprehension", "abstract": "Conversational machine reading comprehension (CMRC) extends traditional single-turn machine reading comprehension (MRC) by multi-turn interactions, which requires machines to consider the history of conversation. Most of models simply combine previous questions for conversation understanding and only employ recurrent neural networks (RNN) for reasoning. To comprehend context profoundly and efficiently from different perspectives, we propose a novel neural network model, Multi-perspective Convolutional Cube (MC\u02c62). We regard each conversation as a cube. 1D and 2D convolutions are integrated with RNN in our model. To avoid models previewing the next turn of conversation, we also extend causal convolution partially to 2D. Experiments on the Conversational Question Answering (CoQA) dataset show that our model achieves state-of-the-art results.", "doc_id": "6eaf0743b5c7d8ab8d7b8e44bf16467a", "publication_year": 2019, "sentences": ["conversational machine reading comprehension ( cmrc ) extends traditional single - turn machine reading comprehension ( mrc ) by multi - turn interactions , which requires machines to consider the history of conversation .", "most of models simply combine previous questions for conversation understanding and only employ recurrent neural networks ( rnn ) for reasoning .", "to comprehend context profoundly and efficiently from different perspectives , we propose a novel neural network model , multi - perspective convolutional cube ( [UNK] ) .", "we regard each conversation as a cube .", "1d and 2d convolutions are integrated with rnn in our model .", "to avoid models previewing the next turn of conversation , we also extend causal convolution partially to 2d . experiments on the conversational question answering ( coqa ) dataset show that our model achieves state - of - the - art results ."], "events": [{"event_type": "ITT", "arguments": [{"text": "conversational machine reading comprehension", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["conversational", "machine", "reading", "comprehension"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "extends", "tokens": ["extends"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "most of models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["most", "of", "models"], "offsets": [34, 35, 36]}, {"text": "simply combine", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["simply", "combine"], "offsets": [37, 38]}], "trigger": {"text": "simply combine", "tokens": ["simply", "combine"], "offsets": [37, 38]}}, {"event_type": "RWF", "arguments": [{"text": "most of models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["most", "of", "models"], "offsets": [34, 35, 36]}, {"text": "recurrent neural networks", "nugget_type": "APP", "argument_type": "Fault", "tokens": ["recurrent", "neural", "networks"], "offsets": [47, 48, 49]}, {"text": "only employ", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["only", "employ"], "offsets": [45, 46]}], "trigger": {"text": "only employ", "tokens": ["only", "employ"], "offsets": [45, 46]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [66]}, {"text": "multi - perspective convolutional cube", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "perspective", "convolutional", "cube"], "offsets": [74, 75, 76, 77, 78]}, {"text": "comprehend", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["comprehend"], "offsets": [57]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [67]}}, {"event_type": "PUR", "arguments": [{"text": "context", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["context"], "offsets": [58]}, {"text": "from different perspectives", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "different", "perspectives"], "offsets": [62, 63, 64]}], "trigger": {"text": "comprehend", "tokens": ["comprehend"], "offsets": [57]}}, {"event_type": "MDS", "arguments": [{"text": "each conversation", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["each", "conversation"], "offsets": [85, 86]}, {"text": "cube", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["cube"], "offsets": [89]}], "trigger": {"text": "regard", "tokens": ["regard"], "offsets": [84]}}, {"event_type": "MDS", "arguments": [{"text": "recurrent neural networks", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["recurrent", "neural", "networks"], "offsets": [47, 48, 49]}, {"text": "1d and 2d convolutions", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["1d", "and", "2d", "convolutions"], "offsets": [91, 92, 93, 94]}], "trigger": {"text": "integrated", "tokens": ["integrated"], "offsets": [96]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [113]}, {"text": "causal convolution partially", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["causal", "convolution", "partially"], "offsets": [116, 117, 118]}, {"text": "2d", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["2d"], "offsets": [120]}], "trigger": {"text": "extend", "tokens": ["extend"], "offsets": [115]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [136]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [132]}}, {"event_type": "FAC", "arguments": [{"text": "multi - perspective convolutional cube", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multi", "-", "perspective", "convolutional", "cube"], "offsets": [74, 75, 76, 77, 78]}, {"text": "state - of - the - art results", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [137, 138, 139, 140, 141, 142, 143, 144]}, {"text": "conversational question answering ( coqa ) dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["conversational", "question", "answering", "dataset"], "offsets": [125, 126, 127, 131]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [136]}}], "document": ["conversational", "machine", "reading", "comprehension", "(", "cmrc", ")", "extends", "traditional", "single", "-", "turn", "machine", "reading", "comprehension", "(", "mrc", ")", "by", "multi", "-", "turn", "interactions", ",", "which", "requires", "machines", "to", "consider", "the", "history", "of", "conversation", ".", "most", "of", "models", "simply", "combine", "previous", "questions", "for", "conversation", "understanding", "and", "only", "employ", "recurrent", "neural", "networks", "(", "rnn", ")", "for", "reasoning", ".", "to", "comprehend", "context", "profoundly", "and", "efficiently", "from", "different", "perspectives", ",", "we", "propose", "a", "novel", "neural", "network", "model", ",", "multi", "-", "perspective", "convolutional", "cube", "(", "[UNK]", ")", ".", "we", "regard", "each", "conversation", "as", "a", "cube", ".", "1d", "and", "2d", "convolutions", "are", "integrated", "with", "rnn", "in", "our", "model", ".", "to", "avoid", "models", "previewing", "the", "next", "turn", "of", "conversation", ",", "we", "also", "extend", "causal", "convolution", "partially", "to", "2d", ".", "experiments", "on", "the", "conversational", "question", "answering", "(", "coqa", ")", "dataset", "show", "that", "our", "model", "achieves", "state", "-", "of", "-", "the", "-", "art", "results", "."]}, {"venue": "ACL", "title": "Fine- and Coarse-Granularity Hybrid Self-Attention for Efficient BERT", "abstract": "Transformer-based pre-trained models, such as BERT, have shown extraordinary success in achieving state-of-the-art results in many natural language processing applications. However, deploying these models can be prohibitively costly, as the standard self-attention mechanism of the Transformer suffers from quadratic computational cost in the input sequence length. To confront this, we propose FCA, a fine- and coarse-granularity hybrid self-attention that reduces the computation cost through progressively shortening the computational sequence length in self-attention. Specifically, FCA conducts an attention-based scoring strategy to determine the informativeness of tokens at each layer. Then, the informative tokens serve as the fine-granularity computing units in self-attention and the uninformative tokens are replaced with one or several clusters as the coarse-granularity computing units in self-attention. Experiments on the standard GLUE benchmark show that BERT with FCA achieves 2x reduction in FLOPs over original BERT with <1% loss in accuracy. We show that FCA offers a significantly better trade-off between accuracy and FLOPs compared to prior methods.", "doc_id": "751474436403bd9188f8e20d0483bcae", "publication_year": 2022, "sentences": ["transformer - based pre - trained models , such as bert , have shown extraordinary success in achieving state - of - the - art results in many natural language processing applications .", "however , deploying these models can be prohibitively costly , as the standard self - attention mechanism of the transformer suffers from quadratic computational cost in the input sequence length .", "to confront this , we propose fca , a fine - and coarse - granularity hybrid self - attention that reduces the computation cost through progressively shortening the computational sequence length in self - attention .", "specifically , fca conducts an attention - based scoring strategy to determine the informativeness of tokens at each layer .", "then , the informative tokens serve as the fine - granularity computing units in self - attention and the uninformative tokens are replaced with one or several clusters as the coarse - granularity computing units in self - attention .", "experiments on the standard glue benchmark show that bert with fca achieves 2x reduction in flops over original bert with < 1 % loss in accuracy .", "we show that fca offers a significantly better trade - off between accuracy and flops compared to prior methods ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language processing applications", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "processing", "applications"], "offsets": [28, 29, 30, 31]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [13]}}, {"event_type": "RWF", "arguments": [{"text": "transformer - based pre - trained models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["transformer", "-", "based", "pre", "-", "trained", "models"], "offsets": [0, 1, 2, 3, 4, 5, 6]}, {"text": "prohibitively costly", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["prohibitively", "costly"], "offsets": [40, 41]}], "trigger": {"text": "deploying", "tokens": ["deploying"], "offsets": [35]}}, {"event_type": "RWF", "arguments": [{"text": "standard self - attention mechanism of the transformer", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["standard", "self", "-", "attention", "mechanism", "of", "the", "transformer"], "offsets": [45, 46, 47, 48, 49, 50, 51, 52]}, {"text": "quadratic computational cost", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["quadratic", "computational", "cost"], "offsets": [55, 56, 57]}, {"text": "in the input sequence length", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "input", "sequence", "length"], "offsets": [58, 59, 60, 61, 62]}], "trigger": {"text": "suffers", "tokens": ["suffers"], "offsets": [53]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [68]}, {"text": "fine - and coarse - granularity hybrid self - attention", "nugget_type": "APP", "argument_type": "Content", "tokens": ["fine", "-", "and", "coarse", "-", "granularity", "hybrid", "self", "-", "attention"], "offsets": [73, 74, 75, 76, 77, 78, 79, 80, 81, 82]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [69]}}, {"event_type": "MDS", "arguments": [{"text": "reduces", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reduces"], "offsets": [84]}, {"text": "computational sequence length", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["computational", "sequence", "length"], "offsets": [92, 93, 94]}], "trigger": {"text": "progressively shortening", "tokens": ["progressively", "shortening"], "offsets": [89, 90]}}, {"event_type": "PUR", "arguments": [{"text": "computation cost", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["computation", "cost"], "offsets": [86, 87]}], "trigger": {"text": "reduces", "tokens": ["reduces"], "offsets": [84]}}, {"event_type": "MDS", "arguments": [{"text": "attention - based scoring strategy", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["attention", "-", "based", "scoring", "strategy"], "offsets": [105, 106, 107, 108, 109]}, {"text": "determine", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["determine"], "offsets": [111]}], "trigger": {"text": "conducts", "tokens": ["conducts"], "offsets": [103]}}, {"event_type": "PUR", "arguments": [{"text": "informativeness of tokens", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["informativeness", "of", "tokens"], "offsets": [113, 114, 115]}, {"text": "at each layer", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "each", "layer"], "offsets": [116, 117, 118]}], "trigger": {"text": "determine", "tokens": ["determine"], "offsets": [111]}}, {"event_type": "MDS", "arguments": [{"text": "fine - granularity computing units", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["fine", "-", "granularity", "computing", "units"], "offsets": [128, 129, 130, 131, 132]}, {"text": "informative tokens", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["informative", "tokens"], "offsets": [123, 124]}], "trigger": {"text": "serve", "tokens": ["serve"], "offsets": [125]}}, {"event_type": "MDS", "arguments": [{"text": "with one or several clusters", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "one", "or", "several", "clusters"], "offsets": [143, 144, 145, 146, 147]}, {"text": "coarse - granularity computing units", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["coarse", "-", "granularity", "computing", "units"], "offsets": [150, 151, 152, 153, 154]}, {"text": "uninformative tokens", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["uninformative", "tokens"], "offsets": [139, 140]}], "trigger": {"text": "replaced", "tokens": ["replaced"], "offsets": [142]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["achieves"], "offsets": [171]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [166]}}, {"event_type": "CMP", "arguments": [{"text": "bert with fca", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["bert", "with", "fca"], "offsets": [168, 169, 170]}, {"text": "2x", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["2x"], "offsets": [172]}, {"text": "original bert", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["original", "bert"], "offsets": [177, 178]}, {"text": "reduction", "nugget_type": "STR", "argument_type": "Result", "tokens": ["reduction"], "offsets": [173]}, {"text": "with < 1 % loss in accuracy", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "<", "1", "%", "loss", "in", "accuracy"], "offsets": [179, 180, 181, 182, 183, 184, 185]}, {"text": "flops", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["flops"], "offsets": [175]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [171]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [187]}, {"text": "offers", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["offers"], "offsets": [191]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [188]}}, {"event_type": "CMP", "arguments": [{"text": "fca", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["fca"], "offsets": [190]}, {"text": "prior methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["prior", "methods"], "offsets": [204, 205]}, {"text": "significantly better trade - off", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significantly", "better", "trade", "-", "off"], "offsets": [193, 194, 195, 196, 197]}, {"text": "accuracy and flops", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["accuracy", "and", "flops"], "offsets": [199, 200, 201]}], "trigger": {"text": "offers", "tokens": ["offers"], "offsets": [191]}}], "document": ["transformer", "-", "based", "pre", "-", "trained", "models", ",", "such", "as", "bert", ",", "have", "shown", "extraordinary", "success", "in", "achieving", "state", "-", "of", "-", "the", "-", "art", "results", "in", "many", "natural", "language", "processing", "applications", ".", "however", ",", "deploying", "these", "models", "can", "be", "prohibitively", "costly", ",", "as", "the", "standard", "self", "-", "attention", "mechanism", "of", "the", "transformer", "suffers", "from", "quadratic", "computational", "cost", "in", "the", "input", "sequence", "length", ".", "to", "confront", "this", ",", "we", "propose", "fca", ",", "a", "fine", "-", "and", "coarse", "-", "granularity", "hybrid", "self", "-", "attention", "that", "reduces", "the", "computation", "cost", "through", "progressively", "shortening", "the", "computational", "sequence", "length", "in", "self", "-", "attention", ".", "specifically", ",", "fca", "conducts", "an", "attention", "-", "based", "scoring", "strategy", "to", "determine", "the", "informativeness", "of", "tokens", "at", "each", "layer", ".", "then", ",", "the", "informative", "tokens", "serve", "as", "the", "fine", "-", "granularity", "computing", "units", "in", "self", "-", "attention", "and", "the", "uninformative", "tokens", "are", "replaced", "with", "one", "or", "several", "clusters", "as", "the", "coarse", "-", "granularity", "computing", "units", "in", "self", "-", "attention", ".", "experiments", "on", "the", "standard", "glue", "benchmark", "show", "that", "bert", "with", "fca", "achieves", "2x", "reduction", "in", "flops", "over", "original", "bert", "with", "<", "1", "%", "loss", "in", "accuracy", ".", "we", "show", "that", "fca", "offers", "a", "significantly", "better", "trade", "-", "off", "between", "accuracy", "and", "flops", "compared", "to", "prior", "methods", "."]}, {"venue": "ACL", "title": "Reversing Gradients in Adversarial Domain Adaptation for Question Deduplication and Textual Entailment Tasks", "abstract": "Adversarial domain adaptation has been recently proposed as an effective technique for textual matching tasks, such as question deduplication. Here we investigate the use of gradient reversal on adversarial domain adaptation to explicitly learn both shared and unshared (domain specific) representations between two textual domains. In doing so, gradient reversal learns features that explicitly compensate for domain mismatch, while still distilling domain specific knowledge that can improve target domain accuracy. We evaluate reversing gradients for adversarial adaptation on multiple domains, and demonstrate that it significantly outperforms other methods on question deduplication as well as on recognizing textual entailment (RTE) tasks, achieving up to 7% absolute boost in base model accuracy on some datasets.", "doc_id": "e69e1dbca6f8e89b2ae97f0ab2443e0c", "publication_year": 2019, "sentences": ["adversarial domain adaptation has been recently proposed as an effective technique for textual matching tasks , such as question deduplication .", "here we investigate the use of gradient reversal on adversarial domain adaptation to explicitly learn both shared and unshared ( domain specific ) representations between two textual domains .", "in doing so , gradient reversal learns features that explicitly compensate for domain mismatch , while still distilling domain specific knowledge that can improve target domain accuracy .", "we evaluate reversing gradients for adversarial adaptation on multiple domains , and demonstrate that it significantly outperforms other methods on question deduplication as well as on recognizing textual entailment ( rte ) tasks , achieving up to 7 % absolute boost in base model accuracy on some datasets ."], "events": [{"event_type": "ITT", "arguments": [{"text": "textual matching tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["textual", "matching", "tasks"], "offsets": [12, 13, 14]}], "trigger": {"text": "technique", "tokens": ["technique"], "offsets": [10]}}, {"event_type": "MDS", "arguments": [{"text": "between two textual domains", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "two", "textual", "domains"], "offsets": [45, 46, 47, 48]}, {"text": "shared representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["shared", "representations"], "offsets": [37, 44]}, {"text": "unshared representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["unshared", "representations"], "offsets": [39, 44]}], "trigger": {"text": "explicitly learn", "tokens": ["explicitly", "learn"], "offsets": [34, 35]}}, {"event_type": "MDS", "arguments": [{"text": "gradient reversal", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["gradient", "reversal"], "offsets": [54, 55]}, {"text": "features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["features"], "offsets": [57]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [56]}}, {"event_type": "MDS", "arguments": [{"text": "domain specific knowledge", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["domain", "specific", "knowledge"], "offsets": [68, 69, 70]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [73]}], "trigger": {"text": "distilling", "tokens": ["distilling"], "offsets": [67]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [78]}, {"text": "reversing gradients", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["reversing", "gradients"], "offsets": [80, 81]}, {"text": "on multiple domains", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "multiple", "domains"], "offsets": [85, 86, 87]}, {"text": "adversarial adaptation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["adversarial", "adaptation"], "offsets": [83, 84]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [79]}}, {"event_type": "FIN", "arguments": [{"text": "significantly outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["significantly", "outperforms"], "offsets": [93, 94]}, {"text": "achieving", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieving"], "offsets": [112]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [90]}}, {"event_type": "CMP", "arguments": [{"text": "other methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["other", "methods"], "offsets": [95, 96]}, {"text": "question deduplication", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["question", "deduplication"], "offsets": [98, 99]}, {"text": "recognizing textual entailment ( rte ) tasks", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["recognizing", "textual", "entailment", "tasks"], "offsets": [104, 105, 106, 110]}, {"text": "significantly outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significantly", "outperforms"], "offsets": [93, 94]}], "trigger": {"text": "significantly outperforms", "tokens": ["significantly", "outperforms"], "offsets": [93, 94]}}, {"event_type": "FAC", "arguments": [{"text": "7 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["7", "%"], "offsets": [115, 116]}, {"text": "base model accuracy", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["base", "model", "accuracy"], "offsets": [120, 121, 122]}, {"text": "absolute boost", "nugget_type": "STR", "argument_type": "Object", "tokens": ["absolute", "boost"], "offsets": [117, 118]}, {"text": "some datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["some", "datasets"], "offsets": [124, 125]}], "trigger": {"text": "achieving", "tokens": ["achieving"], "offsets": [112]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [22]}, {"text": "use of gradient reversal", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["use", "of", "gradient", "reversal"], "offsets": [25, 26, 27, 28]}, {"text": "adversarial domain adaptation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["adversarial", "domain", "adaptation"], "offsets": [30, 31, 32]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [23]}}, {"event_type": "PUR", "arguments": [{"text": "target domain accuracy", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["target", "domain", "accuracy"], "offsets": [74, 75, 76]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [73]}}], "document": ["adversarial", "domain", "adaptation", "has", "been", "recently", "proposed", "as", "an", "effective", "technique", "for", "textual", "matching", "tasks", ",", "such", "as", "question", "deduplication", ".", "here", "we", "investigate", "the", "use", "of", "gradient", "reversal", "on", "adversarial", "domain", "adaptation", "to", "explicitly", "learn", "both", "shared", "and", "unshared", "(", "domain", "specific", ")", "representations", "between", "two", "textual", "domains", ".", "in", "doing", "so", ",", "gradient", "reversal", "learns", "features", "that", "explicitly", "compensate", "for", "domain", "mismatch", ",", "while", "still", "distilling", "domain", "specific", "knowledge", "that", "can", "improve", "target", "domain", "accuracy", ".", "we", "evaluate", "reversing", "gradients", "for", "adversarial", "adaptation", "on", "multiple", "domains", ",", "and", "demonstrate", "that", "it", "significantly", "outperforms", "other", "methods", "on", "question", "deduplication", "as", "well", "as", "on", "recognizing", "textual", "entailment", "(", "rte", ")", "tasks", ",", "achieving", "up", "to", "7", "%", "absolute", "boost", "in", "base", "model", "accuracy", "on", "some", "datasets", "."]}, {"venue": "ACL", "title": "Pre-training and Fine-tuning Neural Topic Model: A Simple yet Effective Approach to Incorporating External Knowledge", "abstract": "Recent years have witnessed growing interests in incorporating external knowledge such as pre-trained word embeddings (PWEs) or pre-trained language models (PLMs) into neural topic modeling. However, we found that employing PWEs and PLMs for topic modeling only achieved limited performance improvements but with huge computational overhead. In this paper, we propose a novel strategy to incorporate external knowledge into neural topic modeling where the neural topic model is pre-trained on a large corpus and then fine-tuned on the target dataset. Experiments have been conducted on three datasets and results show that the proposed approach significantly outperforms both current state-of-the-art neural topic models and some topic modeling approaches enhanced with PWEs or PLMs. Moreover, further study shows that the proposed approach greatly reduces the need for the huge size of training data.", "doc_id": "e577075d1ddea0ce3756ef596ba930ff", "publication_year": 2022, "sentences": ["recent years have witnessed growing interests in incorporating external knowledge such as pre - trained word embeddings ( pwes ) or pre - trained language models ( plms ) into neural topic modeling .", "however , we found that employing pwes and plms for topic modeling only achieved limited performance improvements but with huge computational overhead .", "in this paper , we propose a novel strategy to incorporate external knowledge into neural topic modeling where the neural topic model is pre - trained on a large corpus and then fine - tuned on the target dataset .", "experiments have been conducted on three datasets and results show that the proposed approach significantly outperforms both current state - of - the - art neural topic models and some topic modeling approaches enhanced with pwes or plms .", "moreover , further study shows that the proposed approach greatly reduces the need for the huge size of training data ."], "events": [{"event_type": "RWF", "arguments": [{"text": "only achieved", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["only", "achieved"], "offsets": [46, 47]}], "trigger": {"text": "only achieved", "tokens": ["only", "achieved"], "offsets": [46, 47]}}, {"event_type": "RWF", "arguments": [], "trigger": {"text": "huge computational overhead", "tokens": ["huge", "computational", "overhead"], "offsets": [53, 54, 55]}}, {"event_type": "RWS", "arguments": [{"text": "pre - trained word embeddings", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["pre", "-", "trained", "word", "embeddings"], "offsets": [12, 13, 14, 15, 16]}, {"text": "pre - trained language models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["pre", "-", "trained", "language", "models"], "offsets": [21, 22, 23, 24, 25]}, {"text": "topic modeling", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["topic", "modeling"], "offsets": [44, 45]}], "trigger": {"text": "employing", "tokens": ["employing"], "offsets": [39]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [61]}, {"text": "strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["strategy"], "offsets": [65]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [62]}}, {"event_type": "MDS", "arguments": [{"text": "external knowledge", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["external", "knowledge"], "offsets": [68, 69]}, {"text": "neural topic modeling", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["neural", "topic", "modeling"], "offsets": [71, 72, 73]}], "trigger": {"text": "incorporate", "tokens": ["incorporate"], "offsets": [67]}}, {"event_type": "MDS", "arguments": [{"text": "large corpus", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["large", "corpus"], "offsets": [85, 86]}, {"text": "neural topic model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["neural", "topic", "model"], "offsets": [76, 77, 78]}], "trigger": {"text": "pre - trained", "tokens": ["pre", "-", "trained"], "offsets": [80, 81, 82]}}, {"event_type": "MDS", "arguments": [{"text": "target dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["target", "dataset"], "offsets": [94, 95]}, {"text": "neural topic model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["neural", "topic", "model"], "offsets": [76, 77, 78]}], "trigger": {"text": "fine - tuned", "tokens": ["fine", "-", "tuned"], "offsets": [89, 90, 91]}}, {"event_type": "WKS", "arguments": [{"text": "experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["experiments"], "offsets": [97]}, {"text": "three datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["three", "datasets"], "offsets": [102, 103]}], "trigger": {"text": "conducted", "tokens": ["conducted"], "offsets": [100]}}, {"event_type": "CMP", "arguments": [{"text": "strategy", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["strategy"], "offsets": [65]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [111]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [112]}, {"text": "current state - of - the - art neural topic models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "state", "-", "of", "-", "the", "-", "art", "neural", "topic", "models"], "offsets": [114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [112]}}, {"event_type": "FAC", "arguments": [{"text": "strategy", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["strategy"], "offsets": [65]}, {"text": "greatly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["greatly"], "offsets": [145]}, {"text": "need for the huge size of training data", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["need", "for", "the", "huge", "size", "of", "training", "data"], "offsets": [148, 149, 150, 151, 152, 153, 154, 155]}], "trigger": {"text": "reduces", "tokens": ["reduces"], "offsets": [146]}}], "document": ["recent", "years", "have", "witnessed", "growing", "interests", "in", "incorporating", "external", "knowledge", "such", "as", "pre", "-", "trained", "word", "embeddings", "(", "pwes", ")", "or", "pre", "-", "trained", "language", "models", "(", "plms", ")", "into", "neural", "topic", "modeling", ".", "however", ",", "we", "found", "that", "employing", "pwes", "and", "plms", "for", "topic", "modeling", "only", "achieved", "limited", "performance", "improvements", "but", "with", "huge", "computational", "overhead", ".", "in", "this", "paper", ",", "we", "propose", "a", "novel", "strategy", "to", "incorporate", "external", "knowledge", "into", "neural", "topic", "modeling", "where", "the", "neural", "topic", "model", "is", "pre", "-", "trained", "on", "a", "large", "corpus", "and", "then", "fine", "-", "tuned", "on", "the", "target", "dataset", ".", "experiments", "have", "been", "conducted", "on", "three", "datasets", "and", "results", "show", "that", "the", "proposed", "approach", "significantly", "outperforms", "both", "current", "state", "-", "of", "-", "the", "-", "art", "neural", "topic", "models", "and", "some", "topic", "modeling", "approaches", "enhanced", "with", "pwes", "or", "plms", ".", "moreover", ",", "further", "study", "shows", "that", "the", "proposed", "approach", "greatly", "reduces", "the", "need", "for", "the", "huge", "size", "of", "training", "data", "."]}, {"venue": "ACL", "title": "Exploring the Representation of Word Meanings in Context: A Case Study on Homonymy and Synonymy", "abstract": "This paper presents a multilingual study of word meaning representations in context. We assess the ability of both static and contextualized models to adequately represent different lexical-semantic relations, such as homonymy and synonymy. To do so, we created a new multilingual dataset that allows us to perform a controlled evaluation of several factors such as the impact of the surrounding context or the overlap between words, conveying the same or different senses. A systematic assessment on four scenarios shows that the best monolingual models based on Transformers can adequately disambiguate homonyms in context. However, as they rely heavily on context, these models fail at representing words with different senses when occurring in similar sentences. Experiments are performed in Galician, Portuguese, English, and Spanish, and both the dataset (with more than 3,000 evaluation items) and new models are freely released with this study.", "doc_id": "0731fbe65ed75ca4eba406fda225cf67", "publication_year": 2021, "sentences": ["this paper presents a multilingual study of word meaning representations in context .", "we assess the ability of both static and contextualized models to adequately represent different lexical - semantic relations , such as homonymy and synonymy .", "to do so , we created a new multilingual dataset that allows us to perform a controlled evaluation of several factors such as the impact of the surrounding context or the overlap between words , conveying the same or different senses .", "a systematic assessment on four scenarios shows that the best monolingual models based on transformers can adequately disambiguate homonyms in context .", "however , as they rely heavily on context , these models fail at representing words with different senses when occurring in similar sentences .", "experiments are performed in galician , portuguese , english , and spanish , and both the dataset ( with more than 3 , 000 evaluation items ) and new models are freely released with this study ."], "events": [{"event_type": "ITT", "arguments": [{"text": "word meaning representations", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["word", "meaning", "representations"], "offsets": [7, 8, 9]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [5]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [13]}, {"text": "adequately represent", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["adequately", "represent"], "offsets": [24, 25]}, {"text": "ability of both static models", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["ability", "of", "both", "static", "models"], "offsets": [16, 17, 18, 19, 22]}, {"text": "contextualized models", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["contextualized", "models"], "offsets": [21, 22]}], "trigger": {"text": "assess", "tokens": ["assess"], "offsets": [14]}}, {"event_type": "PUR", "arguments": [{"text": "different lexical - semantic relations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["different", "lexical", "-", "semantic", "relations"], "offsets": [26, 27, 28, 29, 30]}], "trigger": {"text": "adequately represent", "tokens": ["adequately", "represent"], "offsets": [24, 25]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [42]}, {"text": "multilingual dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["multilingual", "dataset"], "offsets": [46, 47]}], "trigger": {"text": "created", "tokens": ["created"], "offsets": [43]}}, {"event_type": "FIN", "arguments": [{"text": "adequately disambiguate", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["adequately", "disambiguate"], "offsets": [96, 97]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [86]}}, {"event_type": "FAC", "arguments": [{"text": "best monolingual models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["best", "monolingual", "models"], "offsets": [89, 90, 91]}, {"text": "homonyms in context", "nugget_type": "MOD", "argument_type": "Object", "tokens": ["homonyms", "in", "context"], "offsets": [98, 99, 100]}], "trigger": {"text": "adequately disambiguate", "tokens": ["adequately", "disambiguate"], "offsets": [96, 97]}}, {"event_type": "RWF", "arguments": [], "trigger": {"text": "fail", "tokens": ["fail"], "offsets": [113]}}, {"event_type": "WKS", "arguments": [{"text": "in galician , portuguese , english , and spanish", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["in", "galician", ",", "portuguese", ",", "english", ",", "and", "spanish"], "offsets": [129, 130, 131, 132, 133, 134, 135, 136, 137]}], "trigger": {"text": "performed", "tokens": ["performed"], "offsets": [128]}}, {"event_type": "WKS", "arguments": [{"text": "new models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["new", "models"], "offsets": [154, 155]}, {"text": "dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["dataset"], "offsets": [142]}], "trigger": {"text": "freely released", "tokens": ["freely", "released"], "offsets": [157, 158]}}], "document": ["this", "paper", "presents", "a", "multilingual", "study", "of", "word", "meaning", "representations", "in", "context", ".", "we", "assess", "the", "ability", "of", "both", "static", "and", "contextualized", "models", "to", "adequately", "represent", "different", "lexical", "-", "semantic", "relations", ",", "such", "as", "homonymy", "and", "synonymy", ".", "to", "do", "so", ",", "we", "created", "a", "new", "multilingual", "dataset", "that", "allows", "us", "to", "perform", "a", "controlled", "evaluation", "of", "several", "factors", "such", "as", "the", "impact", "of", "the", "surrounding", "context", "or", "the", "overlap", "between", "words", ",", "conveying", "the", "same", "or", "different", "senses", ".", "a", "systematic", "assessment", "on", "four", "scenarios", "shows", "that", "the", "best", "monolingual", "models", "based", "on", "transformers", "can", "adequately", "disambiguate", "homonyms", "in", "context", ".", "however", ",", "as", "they", "rely", "heavily", "on", "context", ",", "these", "models", "fail", "at", "representing", "words", "with", "different", "senses", "when", "occurring", "in", "similar", "sentences", ".", "experiments", "are", "performed", "in", "galician", ",", "portuguese", ",", "english", ",", "and", "spanish", ",", "and", "both", "the", "dataset", "(", "with", "more", "than", "3", ",", "000", "evaluation", "items", ")", "and", "new", "models", "are", "freely", "released", "with", "this", "study", "."]}, {"venue": "ACL", "title": "Pretraining with Contrastive Sentence Objectives Improves Discourse Performance of Language Models", "abstract": "Recent models for unsupervised representation learning of text have employed a number of techniques to improve contextual word representations but have put little focus on discourse-level representations. We propose Conpono, an inter-sentence objective for pretraining language models that models discourse coherence and the distance between sentences. Given an anchor sentence, our model is trained to predict the text k sentences away using a sampled-softmax objective where the candidates consist of neighboring sentences and sentences randomly sampled from the corpus. On the discourse representation benchmark DiscoEval, our model improves over the previous state-of-the-art by up to 13% and on average 4% absolute across 7 tasks. Our model is the same size as BERT-Base, but outperforms the much larger BERT-Large model and other more recent approaches that incorporate discourse. We also show that Conpono yields gains of 2%-6% absolute even for tasks that do not explicitly evaluate discourse: textual entailment (RTE), common sense reasoning (COPA) and reading comprehension (ReCoRD).", "doc_id": "fcb9915b031851caf926ec4bb1420fa2", "publication_year": 2020, "sentences": ["recent models for unsupervised representation learning of text have employed a number of techniques to improve contextual word representations but have put little focus on discourse - level representations .", "we propose conpono , an inter - sentence objective for pretraining language models that models discourse coherence and the distance between sentences .", "given an anchor sentence , our model is trained to predict the text k sentences away using a sampled - softmax objective where the candidates consist of neighboring sentences and sentences randomly sampled from the corpus .", "on the discourse representation benchmark discoeval , our model improves over the previous state - of - the - art by up to 13 % and on average 4 % absolute across 7 tasks .", "our model is the same size as bert - base , but outperforms the much larger bert - large model and other more recent approaches that incorporate discourse .", "we also show that conpono yields gains of 2 % - 6 % absolute even for tasks that do not explicitly evaluate discourse : textual entailment ( rte ) , common sense reasoning ( copa ) and reading comprehension ( record ) ."], "events": [{"event_type": "RWS", "arguments": [{"text": "recent models for unsupervised representation learning of text", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["recent", "models", "for", "unsupervised", "representation", "learning", "of", "text"], "offsets": [0, 1, 2, 3, 4, 5, 6, 7]}, {"text": "number of techniques", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["number", "of", "techniques"], "offsets": [11, 12, 13]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [15]}], "trigger": {"text": "employed", "tokens": ["employed"], "offsets": [9]}}, {"event_type": "PUR", "arguments": [{"text": "contextual word representations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["contextual", "word", "representations"], "offsets": [16, 17, 18]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [15]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [30]}, {"text": "models", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["models"], "offsets": [44]}, {"text": "inter - sentence objective for pretraining language models", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["inter", "-", "sentence", "objective", "for", "pretraining", "language", "models"], "offsets": [35, 36, 37, 38, 39, 40, 41, 42]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [31]}}, {"event_type": "PUR", "arguments": [{"text": "discourse coherence", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["discourse", "coherence"], "offsets": [45, 46]}, {"text": "distance between sentences", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["distance", "between", "sentences"], "offsets": [49, 50, 51]}], "trigger": {"text": "models", "tokens": ["models"], "offsets": [44]}}, {"event_type": "MDS", "arguments": [{"text": "given an anchor sentence", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["given", "an", "anchor", "sentence"], "offsets": [53, 54, 55, 56]}, {"text": "sampled - softmax objective", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["sampled", "-", "softmax", "objective"], "offsets": [71, 72, 73, 74]}, {"text": "our model", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["our", "model"], "offsets": [58, 59]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [63]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [69]}}, {"event_type": "PUR", "arguments": [{"text": "text k sentences", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["text", "k", "sentences"], "offsets": [65, 66, 67]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [63]}}, {"event_type": "CMP", "arguments": [{"text": "on the discourse representation benchmark discoeval", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "discourse", "representation", "benchmark", "discoeval"], "offsets": [90, 91, 92, 93, 94, 95]}, {"text": "our model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["our", "model"], "offsets": [97, 98]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [99]}, {"text": "previous state - of - the - art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "state", "-", "of", "-", "the", "-", "art"], "offsets": [102, 103, 104, 105, 106, 107, 108, 109]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [99]}}, {"event_type": "CMP", "arguments": [{"text": "on the discourse representation benchmark discoeval", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "discourse", "representation", "benchmark", "discoeval"], "offsets": [90, 91, 92, 93, 94, 95]}, {"text": "our model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["our", "model"], "offsets": [97, 98]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [99]}, {"text": "previous state - of - the - art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "state", "-", "of", "-", "the", "-", "art"], "offsets": [102, 103, 104, 105, 106, 107, 108, 109]}, {"text": "on average 4 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["on", "average", "4", "%"], "offsets": [116, 117, 118, 119]}, {"text": "absolute", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["absolute"], "offsets": [120]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [99]}}, {"event_type": "CMP", "arguments": [{"text": "our model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["our", "model"], "offsets": [125, 126]}, {"text": "same size", "nugget_type": "STR", "argument_type": "Result", "tokens": ["same", "size"], "offsets": [129, 130]}], "trigger": {"text": "same size", "tokens": ["same", "size"], "offsets": [129, 130]}}, {"event_type": "CMP", "arguments": [{"text": "our model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["our", "model"], "offsets": [125, 126]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [137]}, {"text": "much larger bert - large model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["much", "larger", "bert", "-", "large", "model"], "offsets": [139, 140, 141, 142, 143, 144]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [137]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [154]}, {"text": "yields", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["yields"], "offsets": [159]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [156]}}, {"event_type": "FAC", "arguments": [{"text": "conpono", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["conpono"], "offsets": [158]}, {"text": "gains", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["gains"], "offsets": [160]}, {"text": "2 % - 6 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["2", "%", "-", "6", "%"], "offsets": [162, 163, 164, 165, 166]}, {"text": "even for tasks that do not explicitly evaluate discourse", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["even", "for", "tasks", "that", "do", "not", "explicitly", "evaluate", "discourse"], "offsets": [168, 169, 170, 171, 172, 173, 174, 175, 176]}], "trigger": {"text": "yields", "tokens": ["yields"], "offsets": [159]}}], "document": ["recent", "models", "for", "unsupervised", "representation", "learning", "of", "text", "have", "employed", "a", "number", "of", "techniques", "to", "improve", "contextual", "word", "representations", "but", "have", "put", "little", "focus", "on", "discourse", "-", "level", "representations", ".", "we", "propose", "conpono", ",", "an", "inter", "-", "sentence", "objective", "for", "pretraining", "language", "models", "that", "models", "discourse", "coherence", "and", "the", "distance", "between", "sentences", ".", "given", "an", "anchor", "sentence", ",", "our", "model", "is", "trained", "to", "predict", "the", "text", "k", "sentences", "away", "using", "a", "sampled", "-", "softmax", "objective", "where", "the", "candidates", "consist", "of", "neighboring", "sentences", "and", "sentences", "randomly", "sampled", "from", "the", "corpus", ".", "on", "the", "discourse", "representation", "benchmark", "discoeval", ",", "our", "model", "improves", "over", "the", "previous", "state", "-", "of", "-", "the", "-", "art", "by", "up", "to", "13", "%", "and", "on", "average", "4", "%", "absolute", "across", "7", "tasks", ".", "our", "model", "is", "the", "same", "size", "as", "bert", "-", "base", ",", "but", "outperforms", "the", "much", "larger", "bert", "-", "large", "model", "and", "other", "more", "recent", "approaches", "that", "incorporate", "discourse", ".", "we", "also", "show", "that", "conpono", "yields", "gains", "of", "2", "%", "-", "6", "%", "absolute", "even", "for", "tasks", "that", "do", "not", "explicitly", "evaluate", "discourse", ":", "textual", "entailment", "(", "rte", ")", ",", "common", "sense", "reasoning", "(", "copa", ")", "and", "reading", "comprehension", "(", "record", ")", "."]}, {"venue": "ACL", "title": "Evaluating Evaluation Measures for Ordinal Classification and Ordinal Quantification", "abstract": "Ordinal Classification (OC) is an important classification task where the classes are ordinal. For example, an OC task for sentiment analysis could have the following classes: highly positive, positive, neutral, negative, highly negative. Clearly, evaluation measures for an OC task should penalise misclassifications by considering the ordinal nature of the classes. Ordinal Quantification (OQ) is a related task where the gold data is a distribution over ordinal classes, and the system is required to estimate this distribution. Evaluation measures for an OQ task should also take the ordinal nature of the classes into account. However, for both OC and OQ, there are only a small number of known evaluation measures that meet this basic requirement. In the present study, we utilise data from the SemEval and NTCIR communities to clarify the properties of nine evaluation measures in the context of OC tasks, and six measures in the context of OQ tasks.", "doc_id": "8a3b219e3ffbfe809c2716faa1c5ab6f", "publication_year": 2021, "sentences": ["ordinal classification ( oc ) is an important classification task where the classes are ordinal .", "for example , an oc task for sentiment analysis could have the following classes : highly positive , positive , neutral , negative , highly negative .", "clearly , evaluation measures for an oc task should penalise misclassifications by considering the ordinal nature of the classes .", "ordinal quantification ( oq ) is a related task where the gold data is a distribution over ordinal classes , and the system is required to estimate this distribution .", "evaluation measures for an oq task should also take the ordinal nature of the classes into account .", "however , for both oc and oq , there are only a small number of known evaluation measures that meet this basic requirement .", "in the present study , we utilise data from the semeval and ntcir communities to clarify the properties of nine evaluation measures in the context of oc tasks , and six measures in the context of oq tasks ."], "events": [{"event_type": "ITT", "arguments": [{"text": "ordinal classification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["ordinal", "classification"], "offsets": [0, 1]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [9]}}, {"event_type": "ITT", "arguments": [{"text": "ordinal quantification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["ordinal", "quantification"], "offsets": [63, 64]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [71]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [140]}, {"text": "data from the semeval and ntcir communities", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["data", "from", "the", "semeval", "and", "ntcir", "communities"], "offsets": [142, 143, 144, 145, 146, 147, 148]}, {"text": "clarify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["clarify"], "offsets": [150]}], "trigger": {"text": "utilise", "tokens": ["utilise"], "offsets": [141]}}, {"event_type": "PUR", "arguments": [{"text": "properties of nine evaluation measures in the context of oc tasks", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["properties", "of", "nine", "evaluation", "measures", "in", "the", "context", "of", "oc", "tasks"], "offsets": [152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162]}, {"text": "six measures in the context of oq tasks", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["six", "measures", "in", "the", "context", "of", "oq", "tasks"], "offsets": [165, 166, 167, 168, 169, 170, 171, 172]}], "trigger": {"text": "clarify", "tokens": ["clarify"], "offsets": [150]}}], "document": ["ordinal", "classification", "(", "oc", ")", "is", "an", "important", "classification", "task", "where", "the", "classes", "are", "ordinal", ".", "for", "example", ",", "an", "oc", "task", "for", "sentiment", "analysis", "could", "have", "the", "following", "classes", ":", "highly", "positive", ",", "positive", ",", "neutral", ",", "negative", ",", "highly", "negative", ".", "clearly", ",", "evaluation", "measures", "for", "an", "oc", "task", "should", "penalise", "misclassifications", "by", "considering", "the", "ordinal", "nature", "of", "the", "classes", ".", "ordinal", "quantification", "(", "oq", ")", "is", "a", "related", "task", "where", "the", "gold", "data", "is", "a", "distribution", "over", "ordinal", "classes", ",", "and", "the", "system", "is", "required", "to", "estimate", "this", "distribution", ".", "evaluation", "measures", "for", "an", "oq", "task", "should", "also", "take", "the", "ordinal", "nature", "of", "the", "classes", "into", "account", ".", "however", ",", "for", "both", "oc", "and", "oq", ",", "there", "are", "only", "a", "small", "number", "of", "known", "evaluation", "measures", "that", "meet", "this", "basic", "requirement", ".", "in", "the", "present", "study", ",", "we", "utilise", "data", "from", "the", "semeval", "and", "ntcir", "communities", "to", "clarify", "the", "properties", "of", "nine", "evaluation", "measures", "in", "the", "context", "of", "oc", "tasks", ",", "and", "six", "measures", "in", "the", "context", "of", "oq", "tasks", "."]}, {"venue": "ACL", "title": "Are we there yet? Encoder-decoder neural networks as cognitive models of English past tense inflection", "abstract": "The cognitive mechanisms needed to account for the English past tense have long been a subject of debate in linguistics and cognitive science. Neural network models were proposed early on, but were shown to have clear flaws. Recently, however, Kirov and Cotterell (2018) showed that modern encoder-decoder (ED) models overcome many of these flaws. They also presented evidence that ED models demonstrate humanlike performance in a nonce-word task. Here, we look more closely at the behaviour of their model in this task. We find that (1) the model exhibits instability across multiple simulations in terms of its correlation with human data, and (2) even when results are aggregated across simulations (treating each simulation as an individual human participant), the fit to the human data is not strong\u2014worse than an older rule-based model. These findings hold up through several alternative training regimes and evaluation measures. Although other neural architectures might do better, we conclude that there is still insufficient evidence to claim that neural nets are a good cognitive model for this task.", "doc_id": "b63f9eb95c8937d802de403a7ccbdbb9", "publication_year": 2019, "sentences": ["the cognitive mechanisms needed to account for the english past tense have long been a subject of debate in linguistics and cognitive science .", "neural network models were proposed early on , but were shown to have clear flaws .", "recently , however , kirov and cotterell ( 2018 ) showed that modern encoder - decoder ( ed ) models overcome many of these flaws .", "they also presented evidence that ed models demonstrate humanlike performance in a nonce - word task .", "here , we look more closely at the behaviour of their model in this task .", "we find that ( 1 ) the model exhibits instability across multiple simulations in terms of its correlation with human data , and ( 2 ) even when results are aggregated across simulations ( treating each simulation as an individual human participant ) , the fit to the human data is not strong \u2014 worse than an older rule - based model .", "these findings hold up through several alternative training regimes and evaluation measures .", "although other neural architectures might do better , we conclude that there is still insufficient evidence to claim that neural nets are a good cognitive model for this task ."], "events": [{"event_type": "ITT", "arguments": [{"text": "cognitive mechanisms", "nugget_type": "APP", "argument_type": "Target", "tokens": ["cognitive", "mechanisms"], "offsets": [1, 2]}], "trigger": {"text": "subject", "tokens": ["subject"], "offsets": [15]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [85]}, {"text": "behaviour of their model", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["behaviour", "of", "encoder", "-", "decoder", "models"], "offsets": [91, 92, 53, 54, 55, 59]}], "trigger": {"text": "look", "tokens": ["look"], "offsets": [86]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [99]}, {"text": "exhibits", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["exhibits"], "offsets": [107]}, {"text": "not strong", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["not", "strong"], "offsets": [150, 151]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [100]}}, {"event_type": "FAC", "arguments": [{"text": "across multiple simulations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "multiple", "simulations"], "offsets": [109, 110, 111]}, {"text": "correlation with human data", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["correlation", "with", "human", "data"], "offsets": [116, 117, 118, 119]}, {"text": "encoder - decoder ( ed ) models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["encoder", "-", "decoder", "models"], "offsets": [53, 54, 55, 59]}, {"text": "instability", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["instability"], "offsets": [108]}], "trigger": {"text": "exhibits", "tokens": ["exhibits"], "offsets": [107]}}, {"event_type": "CMP", "arguments": [{"text": "fit to the human data", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["fit", "to", "the", "human", "data"], "offsets": [144, 145, 146, 147, 148]}, {"text": "older rule - based model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["older", "rule", "-", "based", "model"], "offsets": [156, 157, 158, 159, 160]}, {"text": "when results are aggregated across simulations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "results", "are", "aggregated", "across", "simulations"], "offsets": [126, 127, 128, 129, 130, 131]}, {"text": "not strong", "nugget_type": "WEA", "argument_type": "Result", "tokens": ["not", "strong"], "offsets": [150, 151]}], "trigger": {"text": "not strong", "tokens": ["not", "strong"], "offsets": [150, 151]}}], "document": ["the", "cognitive", "mechanisms", "needed", "to", "account", "for", "the", "english", "past", "tense", "have", "long", "been", "a", "subject", "of", "debate", "in", "linguistics", "and", "cognitive", "science", ".", "neural", "network", "models", "were", "proposed", "early", "on", ",", "but", "were", "shown", "to", "have", "clear", "flaws", ".", "recently", ",", "however", ",", "kirov", "and", "cotterell", "(", "2018", ")", "showed", "that", "modern", "encoder", "-", "decoder", "(", "ed", ")", "models", "overcome", "many", "of", "these", "flaws", ".", "they", "also", "presented", "evidence", "that", "ed", "models", "demonstrate", "humanlike", "performance", "in", "a", "nonce", "-", "word", "task", ".", "here", ",", "we", "look", "more", "closely", "at", "the", "behaviour", "of", "their", "model", "in", "this", "task", ".", "we", "find", "that", "(", "1", ")", "the", "model", "exhibits", "instability", "across", "multiple", "simulations", "in", "terms", "of", "its", "correlation", "with", "human", "data", ",", "and", "(", "2", ")", "even", "when", "results", "are", "aggregated", "across", "simulations", "(", "treating", "each", "simulation", "as", "an", "individual", "human", "participant", ")", ",", "the", "fit", "to", "the", "human", "data", "is", "not", "strong", "\u2014", "worse", "than", "an", "older", "rule", "-", "based", "model", ".", "these", "findings", "hold", "up", "through", "several", "alternative", "training", "regimes", "and", "evaluation", "measures", ".", "although", "other", "neural", "architectures", "might", "do", "better", ",", "we", "conclude", "that", "there", "is", "still", "insufficient", "evidence", "to", "claim", "that", "neural", "nets", "are", "a", "good", "cognitive", "model", "for", "this", "task", "."]}, {"venue": "ACL", "title": "Posterior Control of Blackbox Generation", "abstract": "Text generation often requires high-precision output that obeys task-specific rules. This fine-grained control is difficult to enforce with off-the-shelf deep learning models. In this work, we consider augmenting neural generation models with discrete control states learned through a structured latent-variable approach. Under this formulation, task-specific knowledge can be encoded through a range of rich, posterior constraints that are effectively trained into the model. This approach allows users to ground internal model decisions based on prior knowledge, without sacrificing the representational power of neural generative models. Experiments consider applications of this approach for text generation. We find that this method improves over standard benchmarks, while also providing fine-grained control.", "doc_id": "48d89448d640d8d07b585df37e042a9c", "publication_year": 2020, "sentences": ["text generation often requires high - precision output that obeys task - specific rules .", "this fine - grained control is difficult to enforce with off - the - shelf deep learning models .", "in this work , we consider augmenting neural generation models with discrete control states learned through a structured latent - variable approach .", "under this formulation , task - specific knowledge can be encoded through a range of rich , posterior constraints that are effectively trained into the model .", "this approach allows users to ground internal model decisions based on prior knowledge , without sacrificing the representational power of neural generative models .", "experiments consider applications of this approach for text generation .", "we find that this method improves over standard benchmarks , while also providing fine - grained control ."], "events": [{"event_type": "ITT", "arguments": [{"text": "text generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["text", "generation"], "offsets": [0, 1]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [3]}}, {"event_type": "MDS", "arguments": [{"text": "structured latent - variable approach", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["structured", "latent", "-", "variable", "approach"], "offsets": [51, 52, 53, 54, 55]}, {"text": "discrete control states", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["discrete", "control", "states"], "offsets": [45, 46, 47]}, {"text": "neural generation models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["neural", "generation", "models"], "offsets": [41, 42, 43]}], "trigger": {"text": "augmenting", "tokens": ["augmenting"], "offsets": [40]}}, {"event_type": "MDS", "arguments": [{"text": "range of rich , posterior constraints", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["range", "of", "rich", ",", "posterior", "constraints"], "offsets": [70, 71, 72, 73, 74, 75]}, {"text": "task - specific knowledge", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["task", "-", "specific", "knowledge"], "offsets": [61, 62, 63, 64]}], "trigger": {"text": "encoded", "tokens": ["encoded"], "offsets": [67]}}, {"event_type": "MDS", "arguments": [{"text": "internal model decisions based on prior knowledge", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["internal", "model", "decisions", "based", "on", "prior", "knowledge"], "offsets": [90, 91, 92, 93, 94, 95, 96]}, {"text": "without sacrificing the representational power of neural generative models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "sacrificing", "the", "representational", "power", "of", "neural", "generative", "models"], "offsets": [98, 99, 100, 101, 102, 103, 104, 105, 106]}], "trigger": {"text": "ground", "tokens": ["ground"], "offsets": [89]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [118]}, {"text": "improves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["improves"], "offsets": [123]}, {"text": "providing", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["providing"], "offsets": [130]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [119]}}, {"event_type": "CMP", "arguments": [{"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [123]}, {"text": "over standard benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "standard", "benchmarks"], "offsets": [124, 125, 126]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [123]}}, {"event_type": "FAC", "arguments": [{"text": "fine - grained control", "nugget_type": "MOD", "argument_type": "Object", "tokens": ["fine", "-", "grained", "control"], "offsets": [131, 132, 133, 134]}, {"text": "method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["method"], "offsets": [122]}], "trigger": {"text": "providing", "tokens": ["providing"], "offsets": [130]}}], "document": ["text", "generation", "often", "requires", "high", "-", "precision", "output", "that", "obeys", "task", "-", "specific", "rules", ".", "this", "fine", "-", "grained", "control", "is", "difficult", "to", "enforce", "with", "off", "-", "the", "-", "shelf", "deep", "learning", "models", ".", "in", "this", "work", ",", "we", "consider", "augmenting", "neural", "generation", "models", "with", "discrete", "control", "states", "learned", "through", "a", "structured", "latent", "-", "variable", "approach", ".", "under", "this", "formulation", ",", "task", "-", "specific", "knowledge", "can", "be", "encoded", "through", "a", "range", "of", "rich", ",", "posterior", "constraints", "that", "are", "effectively", "trained", "into", "the", "model", ".", "this", "approach", "allows", "users", "to", "ground", "internal", "model", "decisions", "based", "on", "prior", "knowledge", ",", "without", "sacrificing", "the", "representational", "power", "of", "neural", "generative", "models", ".", "experiments", "consider", "applications", "of", "this", "approach", "for", "text", "generation", ".", "we", "find", "that", "this", "method", "improves", "over", "standard", "benchmarks", ",", "while", "also", "providing", "fine", "-", "grained", "control", "."]}, {"venue": "ACL", "title": "Measuring and Improving BERT\u2019s Mathematical Abilities by Predicting the Order of Reasoning.", "abstract": "Imagine you are in a supermarket. You have two bananas in your basket and want to buy four apples. How many fruits do you have in total? This seemingly straightforward question can be challenging for data-driven language models, even if trained at scale. However, we would expect such generic language models to possess some mathematical abilities in addition to typical linguistic competence. Towards this goal, we investigate if a commonly used language model, BERT, possesses such mathematical abilities and, if so, to what degree. For that, we fine-tune BERT on a popular dataset for word math problems, AQuA-RAT, and conduct several tests to understand learned representations better. Since we teach models trained on natural language to do formal mathematics, we hypothesize that such models would benefit from training on semi-formal steps that explain how math results are derived. To better accommodate such training, we also propose new pretext tasks for learning mathematical rules. We call them (Neighbor) Reasoning Order Prediction (ROP or NROP). With this new model, we achieve significantly better outcomes than data-driven baselines and even on-par with more tailored models.", "doc_id": "f2a695dcc926450ac819d9c5912362a4", "publication_year": 2021, "sentences": ["imagine you are in a supermarket .", "you have two bananas in your basket and want to buy four apples .", "how many fruits do you have in total ?", "this seemingly straightforward question can be challenging for data - driven language models , even if trained at scale .", "however , we would expect such generic language models to possess some mathematical abilities in addition to typical linguistic competence .", "towards this goal , we investigate if a commonly used language model , bert , possesses such mathematical abilities and , if so , to what degree .", "for that , we fine - tune bert on a popular dataset for word math problems , aqua - rat , and conduct several tests to understand learned representations better .", "since we teach models trained on natural language to do formal mathematics , we hypothesize that such models would benefit from training on semi - formal steps that explain how math results are derived .", "to better accommodate such training , we also propose new pretext tasks for learning mathematical rules .", "we call them ( neighbor ) reasoning order prediction ( rop or nrop ) .", "with this new model , we achieve significantly better outcomes than data - driven baselines and even on - par with more tailored models ."], "events": [{"event_type": "ITT", "arguments": [{"text": "data - driven language models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["data", "-", "driven", "language", "models"], "offsets": [38, 39, 40, 41, 42]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [36]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [75]}, {"text": "bert", "nugget_type": "APP", "argument_type": "Content", "tokens": ["bert"], "offsets": [84]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [76]}}, {"event_type": "MDS", "arguments": [{"text": "bert", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["bert"], "offsets": [106]}, {"text": "dataset for word math problems", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["dataset", "for", "word", "math", "problems"], "offsets": [110, 111, 112, 113, 114]}], "trigger": {"text": "fine - tune", "tokens": ["fine", "-", "tune"], "offsets": [103, 104, 105]}}, {"event_type": "WKS", "arguments": [{"text": "several tests", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["several", "tests"], "offsets": [122, 123]}, {"text": "understand", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["understand"], "offsets": [125]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [121]}}, {"event_type": "PUR", "arguments": [{"text": "learned representations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["learned", "representations"], "offsets": [126, 127]}], "trigger": {"text": "understand", "tokens": ["understand"], "offsets": [125]}}, {"event_type": "MDS", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["models"], "offsets": [133]}, {"text": "trained on natural language", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["trained", "on", "natural", "language"], "offsets": [134, 135, 136, 137]}, {"text": "do", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["do"], "offsets": [139]}], "trigger": {"text": "teach", "tokens": ["teach"], "offsets": [132]}}, {"event_type": "PUR", "arguments": [{"text": "formal mathematics", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["formal", "mathematics"], "offsets": [140, 141]}], "trigger": {"text": "do", "tokens": ["do"], "offsets": [139]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [171]}, {"text": "learning", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learning"], "offsets": [178]}, {"text": "reasoning order prediction", "nugget_type": "APP", "argument_type": "Content", "tokens": ["reasoning", "order", "prediction"], "offsets": [188, 189, 190]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [173]}}, {"event_type": "PUR", "arguments": [{"text": "mathematical rules", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["mathematical", "rules"], "offsets": [179, 180]}], "trigger": {"text": "learning", "tokens": ["learning"], "offsets": [178]}}, {"event_type": "CMP", "arguments": [{"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [204]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [205]}, {"text": "outcomes", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["outcomes"], "offsets": [206]}, {"text": "data - driven baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["data", "-", "driven", "baselines"], "offsets": [208, 209, 210, 211]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [203]}}, {"event_type": "CMP", "arguments": [{"text": "on - par", "nugget_type": "STR", "argument_type": "Result", "tokens": ["on", "-", "par"], "offsets": [214, 215, 216]}, {"text": "more", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["more"], "offsets": [218]}], "trigger": {"text": "on - par", "tokens": ["on", "-", "par"], "offsets": [214, 215, 216]}}], "document": ["imagine", "you", "are", "in", "a", "supermarket", ".", "you", "have", "two", "bananas", "in", "your", "basket", "and", "want", "to", "buy", "four", "apples", ".", "how", "many", "fruits", "do", "you", "have", "in", "total", "?", "this", "seemingly", "straightforward", "question", "can", "be", "challenging", "for", "data", "-", "driven", "language", "models", ",", "even", "if", "trained", "at", "scale", ".", "however", ",", "we", "would", "expect", "such", "generic", "language", "models", "to", "possess", "some", "mathematical", "abilities", "in", "addition", "to", "typical", "linguistic", "competence", ".", "towards", "this", "goal", ",", "we", "investigate", "if", "a", "commonly", "used", "language", "model", ",", "bert", ",", "possesses", "such", "mathematical", "abilities", "and", ",", "if", "so", ",", "to", "what", "degree", ".", "for", "that", ",", "we", "fine", "-", "tune", "bert", "on", "a", "popular", "dataset", "for", "word", "math", "problems", ",", "aqua", "-", "rat", ",", "and", "conduct", "several", "tests", "to", "understand", "learned", "representations", "better", ".", "since", "we", "teach", "models", "trained", "on", "natural", "language", "to", "do", "formal", "mathematics", ",", "we", "hypothesize", "that", "such", "models", "would", "benefit", "from", "training", "on", "semi", "-", "formal", "steps", "that", "explain", "how", "math", "results", "are", "derived", ".", "to", "better", "accommodate", "such", "training", ",", "we", "also", "propose", "new", "pretext", "tasks", "for", "learning", "mathematical", "rules", ".", "we", "call", "them", "(", "neighbor", ")", "reasoning", "order", "prediction", "(", "rop", "or", "nrop", ")", ".", "with", "this", "new", "model", ",", "we", "achieve", "significantly", "better", "outcomes", "than", "data", "-", "driven", "baselines", "and", "even", "on", "-", "par", "with", "more", "tailored", "models", "."]}, {"venue": "ACL", "title": "Sample, Translate, Recombine: Leveraging Audio Alignments for Data Augmentation in End-to-end Speech Translation", "abstract": "End-to-end speech translation relies on data that pair source-language speech inputs with corresponding translations into a target language. Such data are notoriously scarce, making synthetic data augmentation by back-translation or knowledge distillation a necessary ingredient of end-to-end training. In this paper, we present a novel approach to data augmentation that leverages audio alignments, linguistic properties, and translation. First, we augment a transcription by sampling from a suffix memory that stores text and audio data. Second, we translate the augmented transcript. Finally, we recombine concatenated audio segments and the generated translation. Our method delivers consistent improvements of up to 0.9 and 1.1 BLEU points on top of augmentation with knowledge distillation on five language pairs on CoVoST 2 and on two language pairs on Europarl-ST, respectively.", "doc_id": "12a484547f3d406a8063ead310670cca", "publication_year": 2022, "sentences": ["end - to - end speech translation relies on data that pair source - language speech inputs with corresponding translations into a target language .", "such data are notoriously scarce , making synthetic data augmentation by back - translation or knowledge distillation a necessary ingredient of end - to - end training .", "in this paper , we present a novel approach to data augmentation that leverages audio alignments , linguistic properties , and translation .", "first , we augment a transcription by sampling from a suffix memory that stores text and audio data .", "second , we translate the augmented transcript .", "finally , we recombine concatenated audio segments and the generated translation .", "our method delivers consistent improvements of up to 0 . 9 and 1 . 1 bleu points on top of augmentation with knowledge distillation on five language pairs on covost 2 and on two language pairs on europarl - st , respectively ."], "events": [{"event_type": "ITT", "arguments": [{"text": "end - to - end speech translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["end", "-", "to", "-", "end", "speech", "translation"], "offsets": [0, 1, 2, 3, 4, 5, 6]}], "trigger": {"text": "relies", "tokens": ["relies"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "data", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["data"], "offsets": [26]}], "trigger": {"text": "notoriously scarce", "tokens": ["notoriously", "scarce"], "offsets": [28, 29]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [57]}, {"text": "approach to data augmentation", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach", "to", "data", "augmentation"], "offsets": [61, 62, 63, 64]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [58]}}, {"event_type": "MDS", "arguments": [{"text": "suffix memory", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["suffix", "memory"], "offsets": [86, 87]}, {"text": "augment", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["augment"], "offsets": [79]}], "trigger": {"text": "sampling", "tokens": ["sampling"], "offsets": [83]}}, {"event_type": "PUR", "arguments": [{"text": "transcription", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["transcription"], "offsets": [81]}], "trigger": {"text": "augment", "tokens": ["augment"], "offsets": [79]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [97]}, {"text": "augmented transcript", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["augmented", "transcript"], "offsets": [100, 101]}], "trigger": {"text": "translate", "tokens": ["translate"], "offsets": [98]}}, {"event_type": "FAC", "arguments": [{"text": "consistent improvements", "nugget_type": "STR", "argument_type": "Object", "tokens": ["consistent", "improvements"], "offsets": [118, 119]}, {"text": "approach to data augmentation", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["approach", "to", "data", "augmentation"], "offsets": [61, 62, 63, 64]}, {"text": "0 . 9", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["0", ".", "9"], "offsets": [123, 124, 125]}, {"text": "bleu points", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["bleu", "points"], "offsets": [130, 131]}, {"text": "on five language pairs on covost 2", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "five", "language", "pairs", "on", "covost", "2"], "offsets": [139, 140, 141, 142, 143, 144, 145]}], "trigger": {"text": "delivers", "tokens": ["delivers"], "offsets": [117]}}, {"event_type": "FAC", "arguments": [{"text": "approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["approach"], "offsets": [61]}, {"text": "consistent improvements", "nugget_type": "STR", "argument_type": "Object", "tokens": ["consistent", "improvements"], "offsets": [118, 119]}, {"text": "on two language pairs on europarl - st", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "two", "language", "pairs", "on", "europarl", "-", "st"], "offsets": [147, 148, 149, 150, 151, 152, 153, 154]}, {"text": "1 . 1", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["1", ".", "1"], "offsets": [127, 128, 129]}, {"text": "bleu points", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["bleu", "points"], "offsets": [130, 131]}], "trigger": {"text": "delivers", "tokens": ["delivers"], "offsets": [117]}}, {"event_type": "MDS", "arguments": [{"text": "concatenated audio segments", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["concatenated", "audio", "segments"], "offsets": [107, 108, 109]}, {"text": "generated translation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["generated", "translation"], "offsets": [112, 113]}], "trigger": {"text": "recombine", "tokens": ["recombine"], "offsets": [106]}}], "document": ["end", "-", "to", "-", "end", "speech", "translation", "relies", "on", "data", "that", "pair", "source", "-", "language", "speech", "inputs", "with", "corresponding", "translations", "into", "a", "target", "language", ".", "such", "data", "are", "notoriously", "scarce", ",", "making", "synthetic", "data", "augmentation", "by", "back", "-", "translation", "or", "knowledge", "distillation", "a", "necessary", "ingredient", "of", "end", "-", "to", "-", "end", "training", ".", "in", "this", "paper", ",", "we", "present", "a", "novel", "approach", "to", "data", "augmentation", "that", "leverages", "audio", "alignments", ",", "linguistic", "properties", ",", "and", "translation", ".", "first", ",", "we", "augment", "a", "transcription", "by", "sampling", "from", "a", "suffix", "memory", "that", "stores", "text", "and", "audio", "data", ".", "second", ",", "we", "translate", "the", "augmented", "transcript", ".", "finally", ",", "we", "recombine", "concatenated", "audio", "segments", "and", "the", "generated", "translation", ".", "our", "method", "delivers", "consistent", "improvements", "of", "up", "to", "0", ".", "9", "and", "1", ".", "1", "bleu", "points", "on", "top", "of", "augmentation", "with", "knowledge", "distillation", "on", "five", "language", "pairs", "on", "covost", "2", "and", "on", "two", "language", "pairs", "on", "europarl", "-", "st", ",", "respectively", "."]}, {"venue": "ACL", "title": "Multi-step Reasoning via Recurrent Dual Attention for Visual Dialog", "abstract": "This paper presents a new model for visual dialog, Recurrent Dual Attention Network (ReDAN), using multi-step reasoning to answer a series of questions about an image. In each question-answering turn of a dialog, ReDAN infers the answer progressively through multiple reasoning steps. In each step of the reasoning process, the semantic representation of the question is updated based on the image and the previous dialog history, and the recurrently-refined representation is used for further reasoning in the subsequent step. On the VisDial v1.0 dataset, the proposed ReDAN model achieves a new state-of-the-art of 64.47% NDCG score. Visualization on the reasoning process further demonstrates that ReDAN can locate context-relevant visual and textual clues via iterative refinement, which can lead to the correct answer step-by-step.", "doc_id": "66100c0bcb6d028d24368ba2cc27bfd4", "publication_year": 2019, "sentences": ["this paper presents a new model for visual dialog , recurrent dual attention network ( redan ) , using multi - step reasoning to answer a series of questions about an image .", "in each question - answering turn of a dialog , redan infers the answer progressively through multiple reasoning steps .", "in each step of the reasoning process , the semantic representation of the question is updated based on the image and the previous dialog history , and the recurrently - refined representation is used for further reasoning in the subsequent step .", "on the visdial v1 . 0 dataset , the proposed redan model achieves a new state - of - the - art of 64 . 47 % ndcg score .", "visualization on the reasoning process further demonstrates that redan can locate context - relevant visual and textual clues via iterative refinement , which can lead to the correct answer step - by - step ."], "events": [{"event_type": "PRP", "arguments": [{"text": "visual dialog", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["visual", "dialog"], "offsets": [7, 8]}, {"text": "recurrent dual attention network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["recurrent", "dual", "attention", "network"], "offsets": [10, 11, 12, 13]}], "trigger": {"text": "presents", "tokens": ["presents"], "offsets": [2]}}, {"event_type": "FAC", "arguments": [{"text": "64 . 47 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["64", ".", "47", "%"], "offsets": [118, 119, 120, 121]}, {"text": "visdial v1 . 0 dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["visdial", "v1", ".", "0", "dataset"], "offsets": [97, 98, 99, 100, 101]}, {"text": "redan model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["recurrent", "dual", "attention", "network", "model"], "offsets": [10, 11, 12, 13, 106]}, {"text": "state - of - the - art", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [110, 111, 112, 113, 114, 115, 116]}, {"text": "ndcg score", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["ndcg", "score"], "offsets": [122, 123]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [107]}}, {"event_type": "FIN", "arguments": [{"text": "locate", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["locate"], "offsets": [135]}], "trigger": {"text": "demonstrates", "tokens": ["demonstrates"], "offsets": [131]}}, {"event_type": "FAC", "arguments": [{"text": "recurrent dual attention network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["recurrent", "dual", "attention", "network"], "offsets": [10, 11, 12, 13]}, {"text": "lead", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["lead"], "offsets": [149]}, {"text": "via iterative refinement", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "iterative", "refinement"], "offsets": [143, 144, 145]}, {"text": "context - relevant visual clues", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["context", "-", "relevant", "visual", "clues"], "offsets": [136, 137, 138, 139, 142]}, {"text": "textual clues", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["textual", "clues"], "offsets": [141, 142]}], "trigger": {"text": "locate", "tokens": ["locate"], "offsets": [135]}}, {"event_type": "WKS", "arguments": [{"text": "multi - step reasoning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "step", "reasoning"], "offsets": [19, 20, 21, 22]}, {"text": "answer", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["answer"], "offsets": [24]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [18]}}, {"event_type": "PUR", "arguments": [{"text": "series of questions about an image", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["series", "of", "questions", "about", "an", "image"], "offsets": [26, 27, 28, 29, 30, 31]}], "trigger": {"text": "answer", "tokens": ["answer"], "offsets": [24]}}, {"event_type": "MDS", "arguments": [{"text": "in each question - answering turn of a dialog", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "each", "question", "-", "answering", "turn", "of", "a", "dialog"], "offsets": [33, 34, 35, 36, 37, 38, 39, 40, 41]}, {"text": "answer", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["answer"], "offsets": [46]}, {"text": "multiple reasoning steps", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["multiple", "reasoning", "steps"], "offsets": [49, 50, 51]}], "trigger": {"text": "infers", "tokens": ["infers"], "offsets": [44]}}, {"event_type": "MDS", "arguments": [{"text": "in each step of the reasoning process", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "each", "step", "of", "the", "reasoning", "process"], "offsets": [53, 54, 55, 56, 57, 58, 59]}, {"text": "semantic representation of the question", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["semantic", "representation", "of", "the", "question"], "offsets": [62, 63, 64, 65, 66]}, {"text": "image", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["image"], "offsets": [72]}, {"text": "previous dialog history", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["previous", "dialog", "history"], "offsets": [75, 76, 77]}], "trigger": {"text": "updated", "tokens": ["updated"], "offsets": [68]}}, {"event_type": "MDS", "arguments": [{"text": "in the subsequent step", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "subsequent", "step"], "offsets": [90, 91, 92, 93]}, {"text": "recurrently - refined representation", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["recurrently", "-", "refined", "representation"], "offsets": [81, 82, 83, 84]}, {"text": "further reasoning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["further", "reasoning"], "offsets": [88, 89]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [86]}}, {"event_type": "PUR", "arguments": [{"text": "correct answer", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["correct", "answer"], "offsets": [152, 153]}, {"text": "step - by - step", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["step", "-", "by", "-", "step"], "offsets": [154, 155, 156, 157, 158]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [149]}}], "document": ["this", "paper", "presents", "a", "new", "model", "for", "visual", "dialog", ",", "recurrent", "dual", "attention", "network", "(", "redan", ")", ",", "using", "multi", "-", "step", "reasoning", "to", "answer", "a", "series", "of", "questions", "about", "an", "image", ".", "in", "each", "question", "-", "answering", "turn", "of", "a", "dialog", ",", "redan", "infers", "the", "answer", "progressively", "through", "multiple", "reasoning", "steps", ".", "in", "each", "step", "of", "the", "reasoning", "process", ",", "the", "semantic", "representation", "of", "the", "question", "is", "updated", "based", "on", "the", "image", "and", "the", "previous", "dialog", "history", ",", "and", "the", "recurrently", "-", "refined", "representation", "is", "used", "for", "further", "reasoning", "in", "the", "subsequent", "step", ".", "on", "the", "visdial", "v1", ".", "0", "dataset", ",", "the", "proposed", "redan", "model", "achieves", "a", "new", "state", "-", "of", "-", "the", "-", "art", "of", "64", ".", "47", "%", "ndcg", "score", ".", "visualization", "on", "the", "reasoning", "process", "further", "demonstrates", "that", "redan", "can", "locate", "context", "-", "relevant", "visual", "and", "textual", "clues", "via", "iterative", "refinement", ",", "which", "can", "lead", "to", "the", "correct", "answer", "step", "-", "by", "-", "step", "."]}, {"venue": "ACL", "title": "De-Bias for Generative Extraction in Unified NER Task", "abstract": "Named entity recognition (NER) is a fundamental task to recognize specific types of entities from a given sentence. Depending on how the entities appear in the sentence, it can be divided into three subtasks, namely, Flat NER, Nested NER, and Discontinuous NER. Among the existing approaches, only the generative model can be uniformly adapted to these three subtasks. However, when the generative model is applied to NER, its optimization objective is not consistent with the task, which makes the model vulnerable to the incorrect biases. In this paper, we analyze the incorrect biases in the generation process from a causality perspective and attribute them to two confounders: pre-context confounder and entity-order confounder. Furthermore, we design Intra- and Inter-entity Deconfounding Data Augmentation methods to eliminate the above confounders according to the theory of backdoor adjustment. Experiments show that our method can improve the performance of the generative NER model in various datasets.", "doc_id": "e3a9d5dba88f7579d907ed32e3fafc16", "publication_year": 2022, "sentences": ["named entity recognition ( ner ) is a fundamental task to recognize specific types of entities from a given sentence .", "depending on how the entities appear in the sentence , it can be divided into three subtasks , namely , flat ner , nested ner , and discontinuous ner .", "among the existing approaches , only the generative model can be uniformly adapted to these three subtasks .", "however , when the generative model is applied to ner , its optimization objective is not consistent with the task , which makes the model vulnerable to the incorrect biases .", "in this paper , we analyze the incorrect biases in the generation process from a causality perspective and attribute them to two confounders : pre - context confounder and entity - order confounder .", "furthermore , we design intra - and inter - entity deconfounding data augmentation methods to eliminate the above confounders according to the theory of backdoor adjustment .", "experiments show that our method can improve the performance of the generative ner model in various datasets ."], "events": [{"event_type": "ITT", "arguments": [{"text": "named entity recognition", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["named", "entity", "recognition"], "offsets": [0, 1, 2]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [9]}}, {"event_type": "RWF", "arguments": [{"text": "generative model", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["generative", "model"], "offsets": [73, 74]}, {"text": "incorrect biases", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["incorrect", "biases"], "offsets": [97, 98]}], "trigger": {"text": "makes", "tokens": ["makes"], "offsets": [91]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [104]}, {"text": "from a causality perspective", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "a", "causality", "perspective"], "offsets": [113, 114, 115, 116]}, {"text": "incorrect biases in the generation process", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["incorrect", "biases", "in", "the", "generation", "process"], "offsets": [107, 108, 109, 110, 111, 112]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [105]}}, {"event_type": "WKS", "arguments": [{"text": "incorrect biases in the generation process", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["incorrect", "biases", "in", "the", "generation", "process"], "offsets": [107, 108, 109, 110, 111, 112]}, {"text": "pre - context confounder", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["pre", "-", "context", "confounder"], "offsets": [124, 125, 126, 127]}, {"text": "entity - order confounder", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["entity", "-", "order", "confounder"], "offsets": [129, 130, 131, 132]}], "trigger": {"text": "attribute", "tokens": ["attribute"], "offsets": [118]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [136]}, {"text": "intra - and inter - entity deconfounding data augmentation methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["intra", "-", "and", "inter", "-", "entity", "deconfounding", "data", "augmentation", "methods"], "offsets": [138, 139, 140, 141, 142, 143, 144, 145, 146, 147]}, {"text": "eliminate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["eliminate"], "offsets": [149]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [137]}}, {"event_type": "PUR", "arguments": [], "trigger": {"text": "eliminate", "tokens": ["eliminate"], "offsets": [149]}}, {"event_type": "FAC", "arguments": [{"text": "intra - and inter - entity deconfounding data augmentation methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["intra", "-", "and", "inter", "-", "entity", "deconfounding", "data", "augmentation", "methods"], "offsets": [138, 139, 140, 141, 142, 143, 144, 145, 146, 147]}, {"text": "performance of the generative ner model", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance", "of", "the", "generative", "ner", "model"], "offsets": [169, 170, 171, 172, 173, 174]}, {"text": "various datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["various", "datasets"], "offsets": [176, 177]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [167]}}], "document": ["named", "entity", "recognition", "(", "ner", ")", "is", "a", "fundamental", "task", "to", "recognize", "specific", "types", "of", "entities", "from", "a", "given", "sentence", ".", "depending", "on", "how", "the", "entities", "appear", "in", "the", "sentence", ",", "it", "can", "be", "divided", "into", "three", "subtasks", ",", "namely", ",", "flat", "ner", ",", "nested", "ner", ",", "and", "discontinuous", "ner", ".", "among", "the", "existing", "approaches", ",", "only", "the", "generative", "model", "can", "be", "uniformly", "adapted", "to", "these", "three", "subtasks", ".", "however", ",", "when", "the", "generative", "model", "is", "applied", "to", "ner", ",", "its", "optimization", "objective", "is", "not", "consistent", "with", "the", "task", ",", "which", "makes", "the", "model", "vulnerable", "to", "the", "incorrect", "biases", ".", "in", "this", "paper", ",", "we", "analyze", "the", "incorrect", "biases", "in", "the", "generation", "process", "from", "a", "causality", "perspective", "and", "attribute", "them", "to", "two", "confounders", ":", "pre", "-", "context", "confounder", "and", "entity", "-", "order", "confounder", ".", "furthermore", ",", "we", "design", "intra", "-", "and", "inter", "-", "entity", "deconfounding", "data", "augmentation", "methods", "to", "eliminate", "the", "above", "confounders", "according", "to", "the", "theory", "of", "backdoor", "adjustment", ".", "experiments", "show", "that", "our", "method", "can", "improve", "the", "performance", "of", "the", "generative", "ner", "model", "in", "various", "datasets", "."]}, {"venue": "ACL", "title": "CogAlign: Learning to Align Textual Neural Representations to Cognitive Language Processing Signals", "abstract": "Most previous studies integrate cognitive language processing signals (e.g., eye-tracking or EEG data) into neural models of natural language processing (NLP) just by directly concatenating word embeddings with cognitive features, ignoring the gap between the two modalities (i.e., textual vs. cognitive) and noise in cognitive features. In this paper, we propose a CogAlign approach to these issues, which learns to align textual neural representations to cognitive features. In CogAlign, we use a shared encoder equipped with a modality discriminator to alternatively encode textual and cognitive inputs to capture their differences and commonalities. Additionally, a text-aware attention mechanism is proposed to detect task-related information and to avoid using noise in cognitive features. Experimental results on three NLP tasks, namely named entity recognition, sentiment analysis and relation extraction, show that CogAlign achieves significant improvements with multiple cognitive features over state-of-the-art models on public datasets. Moreover, our model is able to transfer cognitive information to other datasets that do not have any cognitive processing signals.", "doc_id": "2cda10997896a127df318aba43d0c080", "publication_year": 2021, "sentences": ["most previous studies integrate cognitive language processing signals ( e . g . , eye - tracking or eeg data ) into neural models of natural language processing ( nlp ) just by directly concatenating word embeddings with cognitive features , ignoring the gap between the two modalities ( i . e . , textual vs . cognitive ) and noise in cognitive features .", "in this paper , we propose a cogalign approach to these issues , which learns to align textual neural representations to cognitive features .", "in cogalign , we use a shared encoder equipped with a modality discriminator to alternatively encode textual and cognitive inputs to capture their differences and commonalities .", "additionally , a text - aware attention mechanism is proposed to detect task - related information and to avoid using noise in cognitive features .", "experimental results on three nlp tasks , namely named entity recognition , sentiment analysis and relation extraction , show that cogalign achieves significant improvements with multiple cognitive features over state - of - the - art models on public datasets .", "moreover , our model is able to transfer cognitive information to other datasets that do not have any cognitive processing signals ."], "events": [{"event_type": "RWS", "arguments": [{"text": "cognitive language processing signals", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["cognitive", "language", "processing", "signals"], "offsets": [4, 5, 6, 7]}, {"text": "neural models of natural language processing", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["neural", "models", "of", "natural", "language", "processing"], "offsets": [22, 23, 24, 25, 26, 27]}], "trigger": {"text": "integrate", "tokens": ["integrate"], "offsets": [3]}}, {"event_type": "RWS", "arguments": [{"text": "cognitive features", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["cognitive", "features"], "offsets": [38, 39]}, {"text": "word embeddings", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["word", "embeddings"], "offsets": [35, 36]}], "trigger": {"text": "directly concatenating", "tokens": ["directly", "concatenating"], "offsets": [33, 34]}}, {"event_type": "RWS", "arguments": [{"text": "gap between the two modalities ( i . e . , textual vs . cognitive ) and noise in cognitive features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["gap", "between", "the", "two", "modalities", "and", "noise", "in", "cognitive", "features"], "offsets": [43, 44, 45, 46, 47, 59, 60, 61, 62, 63]}], "trigger": {"text": "ignoring", "tokens": ["ignoring"], "offsets": [41]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [69]}, {"text": "cogalign approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["cogalign", "approach"], "offsets": [72, 73]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [70]}}, {"event_type": "MDS", "arguments": [{"text": "textual neural representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["textual", "neural", "representations"], "offsets": [82, 83, 84]}, {"text": "cognitive features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["cognitive", "features"], "offsets": [86, 87]}], "trigger": {"text": "align", "tokens": ["align"], "offsets": [81]}}, {"event_type": "PRP", "arguments": [{"text": "text - aware attention mechanism", "nugget_type": "APP", "argument_type": "Content", "tokens": ["text", "-", "aware", "attention", "mechanism"], "offsets": [119, 120, 121, 122, 123]}, {"text": "detect", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["detect"], "offsets": [127]}, {"text": "avoid", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["avoid"], "offsets": [134]}], "trigger": {"text": "proposed", "tokens": ["proposed"], "offsets": [125]}}, {"event_type": "PUR", "arguments": [{"text": "task - related information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["task", "-", "related", "information"], "offsets": [128, 129, 130, 131]}], "trigger": {"text": "detect", "tokens": ["detect"], "offsets": [127]}}, {"event_type": "PUR", "arguments": [{"text": "using noise in cognitive features", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["using", "noise", "in", "cognitive", "features"], "offsets": [135, 136, 137, 138, 139]}], "trigger": {"text": "avoid", "tokens": ["avoid"], "offsets": [134]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["achieves"], "offsets": [162]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [159]}}, {"event_type": "CMP", "arguments": [{"text": "cogalign approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["cogalign", "approach"], "offsets": [72, 73]}, {"text": "significant", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significant"], "offsets": [163]}, {"text": "state - of - the - art models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "models"], "offsets": [170, 171, 172, 173, 174, 175, 176, 177]}, {"text": "public datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["public", "datasets"], "offsets": [179, 180]}, {"text": "improvements with multiple cognitive features", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvements", "with", "multiple", "cognitive", "features"], "offsets": [164, 165, 166, 167, 168]}, {"text": "on three nlp tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "three", "nlp", "tasks"], "offsets": [143, 144, 145, 146]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [162]}}, {"event_type": "MDS", "arguments": [{"text": "cognitive information", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["cognitive", "information"], "offsets": [190, 191]}, {"text": "other datasets that do not have any cognitive processing signals", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["other", "datasets", "that", "do", "not", "have", "any", "cognitive", "processing", "signals"], "offsets": [193, 194, 195, 196, 197, 198, 199, 200, 201, 202]}], "trigger": {"text": "transfer", "tokens": ["transfer"], "offsets": [189]}}], "document": ["most", "previous", "studies", "integrate", "cognitive", "language", "processing", "signals", "(", "e", ".", "g", ".", ",", "eye", "-", "tracking", "or", "eeg", "data", ")", "into", "neural", "models", "of", "natural", "language", "processing", "(", "nlp", ")", "just", "by", "directly", "concatenating", "word", "embeddings", "with", "cognitive", "features", ",", "ignoring", "the", "gap", "between", "the", "two", "modalities", "(", "i", ".", "e", ".", ",", "textual", "vs", ".", "cognitive", ")", "and", "noise", "in", "cognitive", "features", ".", "in", "this", "paper", ",", "we", "propose", "a", "cogalign", "approach", "to", "these", "issues", ",", "which", "learns", "to", "align", "textual", "neural", "representations", "to", "cognitive", "features", ".", "in", "cogalign", ",", "we", "use", "a", "shared", "encoder", "equipped", "with", "a", "modality", "discriminator", "to", "alternatively", "encode", "textual", "and", "cognitive", "inputs", "to", "capture", "their", "differences", "and", "commonalities", ".", "additionally", ",", "a", "text", "-", "aware", "attention", "mechanism", "is", "proposed", "to", "detect", "task", "-", "related", "information", "and", "to", "avoid", "using", "noise", "in", "cognitive", "features", ".", "experimental", "results", "on", "three", "nlp", "tasks", ",", "namely", "named", "entity", "recognition", ",", "sentiment", "analysis", "and", "relation", "extraction", ",", "show", "that", "cogalign", "achieves", "significant", "improvements", "with", "multiple", "cognitive", "features", "over", "state", "-", "of", "-", "the", "-", "art", "models", "on", "public", "datasets", ".", "moreover", ",", "our", "model", "is", "able", "to", "transfer", "cognitive", "information", "to", "other", "datasets", "that", "do", "not", "have", "any", "cognitive", "processing", "signals", "."]}, {"venue": "ACL", "title": "HiTab: A Hierarchical Table Dataset for Question Answering and Natural Language Generation", "abstract": "Tables are often created with hierarchies, but existing works on table reasoning mainly focus on flat tables and neglect hierarchical tables. Hierarchical tables challenge numerical reasoning by complex hierarchical indexing, as well as implicit relationships of calculation and semantics. We present a new dataset, HiTab, to study question answering (QA) and natural language generation (NLG) over hierarchical tables. HiTab is a cross-domain dataset constructed from a wealth of statistical reports and Wikipedia pages, and has unique characteristics: (1) nearly all tables are hierarchical, and (2) QA pairs are not proposed by annotators from scratch, but are revised from real and meaningful sentences authored by analysts. (3) to reveal complex numerical reasoning in statistical reports, we provide fine-grained annotations of quantity and entity alignment. Experiments suggest that this HiTab presents a strong challenge for existing baselines and a valuable benchmark for future research. Targeting hierarchical structure, we devise a hierarchy-aware logical form for symbolic reasoning over tables, which shows high effectiveness. Targeting table reasoning, we leverage entity and quantity alignment to explore partially supervised training in QA and conditional generation in NLG, and largely reduce spurious predictions in QA and produce better descriptions in NLG.", "doc_id": "c43cbfe9be56c8c6e627076dbb7798a2", "publication_year": 2022, "sentences": ["tables are often created with hierarchies , but existing works on table reasoning mainly focus on flat tables and neglect hierarchical tables .", "hierarchical tables challenge numerical reasoning by complex hierarchical indexing , as well as implicit relationships of calculation and semantics .", "we present a new dataset , hitab , to study question answering ( qa ) and natural language generation ( nlg ) over hierarchical tables .", "hitab is a cross - domain dataset constructed from a wealth of statistical reports and wikipedia pages , and has unique characteristics : ( 1 ) nearly all tables are hierarchical , and ( 2 ) qa pairs are not proposed by annotators from scratch , but are revised from real and meaningful sentences authored by analysts .", "( 3 ) to reveal complex numerical reasoning in statistical reports , we provide fine - grained annotations of quantity and entity alignment .", "experiments suggest that this hitab presents a strong challenge for existing baselines and a valuable benchmark for future research .", "targeting hierarchical structure , we devise a hierarchy - aware logical form for symbolic reasoning over tables , which shows high effectiveness .", "targeting table reasoning , we leverage entity and quantity alignment to explore partially supervised training in qa and conditional generation in nlg , and largely reduce spurious predictions in qa and produce better descriptions in nlg ."], "events": [{"event_type": "ITT", "arguments": [{"text": "table reasoning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["table", "reasoning"], "offsets": [11, 12]}], "trigger": {"text": "focus", "tokens": ["focus"], "offsets": [14]}}, {"event_type": "RWF", "arguments": [{"text": "neglect", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["neglect"], "offsets": [19]}, {"text": "existing works on table reasoning", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "works", "on", "table", "reasoning"], "offsets": [8, 9, 10, 11, 12]}], "trigger": {"text": "neglect", "tokens": ["neglect"], "offsets": [19]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [43]}, {"text": "hitab", "nugget_type": "DST", "argument_type": "Content", "tokens": ["hitab"], "offsets": [49]}, {"text": "study", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["study"], "offsets": [52]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [44]}}, {"event_type": "PUR", "arguments": [{"text": "question answering", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["question", "answering"], "offsets": [53, 54]}, {"text": "natural language generation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["nlg"], "offsets": [229]}, {"text": "over hierarchical tables", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "hierarchical", "tables"], "offsets": [65, 66, 67]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [52]}}, {"event_type": "FIN", "arguments": [{"text": "presents", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["presents"], "offsets": [156]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [152]}}, {"event_type": "FAC", "arguments": [{"text": "hitab", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["hitab"], "offsets": [155]}, {"text": "valuable benchmark", "nugget_type": "STR", "argument_type": "Object", "tokens": ["valuable", "benchmark"], "offsets": [165, 166]}, {"text": "future research", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["future", "research"], "offsets": [168, 169]}, {"text": "strong challenge", "nugget_type": "STR", "argument_type": "Object", "tokens": ["strong", "challenge"], "offsets": [158, 159]}, {"text": "existing baselines", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["existing", "baselines"], "offsets": [161, 162]}], "trigger": {"text": "presents", "tokens": ["presents"], "offsets": [156]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [175]}, {"text": "hierarchy - aware logical form", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hierarchy", "-", "aware", "logical", "form"], "offsets": [178, 179, 180, 181, 182]}, {"text": "symbolic reasoning over tables", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["symbolic", "reasoning", "over", "tables"], "offsets": [184, 185, 186, 187]}], "trigger": {"text": "devise", "tokens": ["devise"], "offsets": [176]}}, {"event_type": "FAC", "arguments": [{"text": "hierarchy - aware logical form", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["hierarchy", "-", "aware", "logical", "form"], "offsets": [178, 179, 180, 181, 182]}, {"text": "high effectiveness", "nugget_type": "STR", "argument_type": "Object", "tokens": ["high", "effectiveness"], "offsets": [191, 192]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [190]}}, {"event_type": "MDS", "arguments": [{"text": "partially supervised training in qa", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["partially", "supervised", "training", "in", "question", "answering"], "offsets": [206, 207, 208, 209, 53, 54]}, {"text": "entity and quantity alignment", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["entity", "and", "quantity", "alignment"], "offsets": [200, 201, 202, 203]}, {"text": "conditional generation in nlg", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["conditional", "generation", "in", "nlg"], "offsets": [212, 213, 214, 229]}, {"text": "largely reduce", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["largely", "reduce"], "offsets": [218, 219]}, {"text": "produce", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["produce"], "offsets": [225]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [205]}}, {"event_type": "PUR", "arguments": [{"text": "spurious predictions in qa", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["spurious", "predictions", "in", "question", "answering"], "offsets": [220, 221, 222, 53, 54]}], "trigger": {"text": "largely reduce", "tokens": ["largely", "reduce"], "offsets": [218, 219]}}, {"event_type": "FAC", "arguments": [{"text": "hierarchy - aware logical form for symbolic reasoning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["hierarchy", "-", "aware", "logical", "form", "for", "symbolic", "reasoning"], "offsets": [178, 179, 180, 181, 182, 183, 184, 185]}, {"text": "spurious predictions", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["spurious", "predictions"], "offsets": [220, 221]}], "trigger": {"text": "largely reduce", "tokens": ["largely", "reduce"], "offsets": [218, 219]}}, {"event_type": "FAC", "arguments": [{"text": "hierarchy - aware logical form for symbolic reasoning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["hierarchy", "-", "aware", "logical", "form", "for", "symbolic", "reasoning"], "offsets": [178, 179, 180, 181, 182, 183, 184, 185]}, {"text": "better descriptions in nlg", "nugget_type": "STR", "argument_type": "Object", "tokens": ["better", "descriptions", "in", "nlg"], "offsets": [226, 227, 228, 229]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [225]}}, {"event_type": "RWF", "arguments": [{"text": "hierarchical tables", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["hierarchical", "tables"], "offsets": [23, 24]}, {"text": "challenge", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["challenge"], "offsets": [25]}], "trigger": {"text": "challenge", "tokens": ["challenge"], "offsets": [25]}}, {"event_type": "PUR", "arguments": [{"text": "better descriptions in nlg", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["better", "descriptions", "in", "nlg"], "offsets": [226, 227, 228, 229]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [225]}}, {"event_type": "PUR", "arguments": [{"text": "complex numerical reasoning", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["complex", "numerical", "reasoning"], "offsets": [132, 133, 134]}, {"text": "in statistical reports", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "statistical", "reports"], "offsets": [135, 136, 137]}], "trigger": {"text": "reveal", "tokens": ["reveal"], "offsets": [131]}}], "document": ["tables", "are", "often", "created", "with", "hierarchies", ",", "but", "existing", "works", "on", "table", "reasoning", "mainly", "focus", "on", "flat", "tables", "and", "neglect", "hierarchical", "tables", ".", "hierarchical", "tables", "challenge", "numerical", "reasoning", "by", "complex", "hierarchical", "indexing", ",", "as", "well", "as", "implicit", "relationships", "of", "calculation", "and", "semantics", ".", "we", "present", "a", "new", "dataset", ",", "hitab", ",", "to", "study", "question", "answering", "(", "qa", ")", "and", "natural", "language", "generation", "(", "nlg", ")", "over", "hierarchical", "tables", ".", "hitab", "is", "a", "cross", "-", "domain", "dataset", "constructed", "from", "a", "wealth", "of", "statistical", "reports", "and", "wikipedia", "pages", ",", "and", "has", "unique", "characteristics", ":", "(", "1", ")", "nearly", "all", "tables", "are", "hierarchical", ",", "and", "(", "2", ")", "qa", "pairs", "are", "not", "proposed", "by", "annotators", "from", "scratch", ",", "but", "are", "revised", "from", "real", "and", "meaningful", "sentences", "authored", "by", "analysts", ".", "(", "3", ")", "to", "reveal", "complex", "numerical", "reasoning", "in", "statistical", "reports", ",", "we", "provide", "fine", "-", "grained", "annotations", "of", "quantity", "and", "entity", "alignment", ".", "experiments", "suggest", "that", "this", "hitab", "presents", "a", "strong", "challenge", "for", "existing", "baselines", "and", "a", "valuable", "benchmark", "for", "future", "research", ".", "targeting", "hierarchical", "structure", ",", "we", "devise", "a", "hierarchy", "-", "aware", "logical", "form", "for", "symbolic", "reasoning", "over", "tables", ",", "which", "shows", "high", "effectiveness", ".", "targeting", "table", "reasoning", ",", "we", "leverage", "entity", "and", "quantity", "alignment", "to", "explore", "partially", "supervised", "training", "in", "qa", "and", "conditional", "generation", "in", "nlg", ",", "and", "largely", "reduce", "spurious", "predictions", "in", "qa", "and", "produce", "better", "descriptions", "in", "nlg", "."]}, {"venue": "ACL", "title": "MLBiNet: A Cross-Sentence Collective Event Detection Network", "abstract": "We consider the problem of collectively detecting multiple events, particularly in cross-sentence settings. The key to dealing with the problem is to encode semantic information and model event inter-dependency at a document-level. In this paper, we reformulate it as a Seq2Seq task and propose a Multi-Layer Bidirectional Network (MLBiNet) to capture the document-level association of events and semantic information simultaneously. Specifically, a bidirectional decoder is firstly devised to model event inter-dependency within a sentence when decoding the event tag vector sequence. Secondly, an information aggregation module is employed to aggregate sentence-level semantic and event tag information. Finally, we stack multiple bidirectional decoders and feed cross-sentence information, forming a multi-layer bidirectional tagging architecture to iteratively propagate information across sentences. We show that our approach provides significant improvement in performance compared to the current state-of-the-art results.", "doc_id": "fb6fa1d713aa51feaa0e0963de5ccd7c", "publication_year": 2021, "sentences": ["we consider the problem of collectively detecting multiple events , particularly in cross - sentence settings .", "the key to dealing with the problem is to encode semantic information and model event inter - dependency at a document - level .", "in this paper , we reformulate it as a seq2seq task and propose a multi - layer bidirectional network ( mlbinet ) to capture the document - level association of events and semantic information simultaneously .", "specifically , a bidirectional decoder is firstly devised to model event inter - dependency within a sentence when decoding the event tag vector sequence .", "secondly , an information aggregation module is employed to aggregate sentence - level semantic and event tag information .", "finally , we stack multiple bidirectional decoders and feed cross - sentence information , forming a multi - layer bidirectional tagging architecture to iteratively propagate information across sentences .", "we show that our approach provides significant improvement in performance compared to the current state - of - the - art results ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [45]}, {"text": "seq2seq task", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["seq2seq", "task"], "offsets": [50, 51]}], "trigger": {"text": "reformulate", "tokens": ["reformulate"], "offsets": [46]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [45]}, {"text": "multi - layer bidirectional network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "layer", "bidirectional", "network"], "offsets": [55, 56, 57, 58, 59]}, {"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [64]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [53]}}, {"event_type": "PUR", "arguments": [{"text": "simultaneously", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["simultaneously"], "offsets": [75]}, {"text": "document - level association of events", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["document", "-", "level", "association", "of", "events"], "offsets": [66, 67, 68, 69, 70, 71]}, {"text": "document - level association of semantic information", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["document", "-", "level", "association", "of", "semantic", "information"], "offsets": [66, 67, 68, 69, 70, 73, 74]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [64]}}, {"event_type": "MDS", "arguments": [{"text": "bidirectional decoder", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["bidirectional", "decoder"], "offsets": [80, 81]}, {"text": "event inter - dependency within a sentence", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["event", "inter", "-", "dependency", "within", "a", "sentence"], "offsets": [87, 88, 89, 90, 91, 92, 93]}, {"text": "when decoding the event tag vector sequence", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "decoding", "the", "event", "tag", "vector", "sequence"], "offsets": [94, 95, 96, 97, 98, 99, 100]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [86]}}, {"event_type": "MDS", "arguments": [{"text": "information aggregation module", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["information", "aggregation", "module"], "offsets": [105, 106, 107]}, {"text": "sentence - level semantic", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["sentence", "-", "level", "semantic"], "offsets": [112, 113, 114, 115]}, {"text": "event tag information", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["event", "tag", "information"], "offsets": [117, 118, 119]}], "trigger": {"text": "aggregate", "tokens": ["aggregate"], "offsets": [111]}}, {"event_type": "MDS", "arguments": [{"text": "multiple bidirectional decoders", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["multiple", "bidirectional", "decoders"], "offsets": [125, 126, 127]}, {"text": "cross - sentence information", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["cross", "-", "sentence", "information"], "offsets": [130, 131, 132, 133]}, {"text": "multi - layer bidirectional tagging architecture", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["multi", "-", "layer", "bidirectional", "tagging", "architecture"], "offsets": [137, 138, 139, 140, 141, 142]}, {"text": "iteratively propagate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["iteratively", "propagate"], "offsets": [144, 145]}], "trigger": {"text": "forming", "tokens": ["forming"], "offsets": [135]}}, {"event_type": "PUR", "arguments": [{"text": "information across sentences", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["information", "across", "sentences"], "offsets": [146, 147, 148]}], "trigger": {"text": "iteratively propagate", "tokens": ["iteratively", "propagate"], "offsets": [144, 145]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [150]}, {"text": "provides", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["provides"], "offsets": [155]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [151]}}, {"event_type": "CMP", "arguments": [{"text": "current state - of - the - art results", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [163, 164, 165, 166, 167, 168, 169, 170, 171]}, {"text": "significant improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significant", "improvement"], "offsets": [156, 157]}, {"text": "multi - layer bidirectional network", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["multi", "-", "layer", "bidirectional", "network"], "offsets": [55, 56, 57, 58, 59]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [159]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [155]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "problem of collectively detecting multiple events", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["problem", "of", "collectively", "detecting", "multiple", "events"], "offsets": [3, 4, 5, 6, 7, 8]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [1]}}, {"event_type": "RWS", "arguments": [{"text": "semantic information", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["semantic", "information"], "offsets": [27, 28]}], "trigger": {"text": "encode", "tokens": ["encode"], "offsets": [26]}}, {"event_type": "RWS", "arguments": [{"text": "event inter - dependency", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["event", "inter", "-", "dependency"], "offsets": [31, 32, 33, 34]}, {"text": "at a document - level", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "a", "document", "-", "level"], "offsets": [35, 36, 37, 38, 39]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [30]}}], "document": ["we", "consider", "the", "problem", "of", "collectively", "detecting", "multiple", "events", ",", "particularly", "in", "cross", "-", "sentence", "settings", ".", "the", "key", "to", "dealing", "with", "the", "problem", "is", "to", "encode", "semantic", "information", "and", "model", "event", "inter", "-", "dependency", "at", "a", "document", "-", "level", ".", "in", "this", "paper", ",", "we", "reformulate", "it", "as", "a", "seq2seq", "task", "and", "propose", "a", "multi", "-", "layer", "bidirectional", "network", "(", "mlbinet", ")", "to", "capture", "the", "document", "-", "level", "association", "of", "events", "and", "semantic", "information", "simultaneously", ".", "specifically", ",", "a", "bidirectional", "decoder", "is", "firstly", "devised", "to", "model", "event", "inter", "-", "dependency", "within", "a", "sentence", "when", "decoding", "the", "event", "tag", "vector", "sequence", ".", "secondly", ",", "an", "information", "aggregation", "module", "is", "employed", "to", "aggregate", "sentence", "-", "level", "semantic", "and", "event", "tag", "information", ".", "finally", ",", "we", "stack", "multiple", "bidirectional", "decoders", "and", "feed", "cross", "-", "sentence", "information", ",", "forming", "a", "multi", "-", "layer", "bidirectional", "tagging", "architecture", "to", "iteratively", "propagate", "information", "across", "sentences", ".", "we", "show", "that", "our", "approach", "provides", "significant", "improvement", "in", "performance", "compared", "to", "the", "current", "state", "-", "of", "-", "the", "-", "art", "results", "."]}, {"venue": "ACL", "title": "Missing Modality Imagination Network for Emotion Recognition with Uncertain Missing Modalities", "abstract": "Multimodal fusion has been proved to improve emotion recognition performance in previous works. However, in real-world applications, we often encounter the problem of missing modality, and which modalities will be missing is uncertain. It makes the fixed multimodal fusion fail in such cases. In this work, we propose a unified model, Missing Modality Imagination Network (MMIN), to deal with the uncertain missing modality problem. MMIN learns robust joint multimodal representations, which can predict the representation of any missing modality given available modalities under different missing modality conditions.Comprehensive experiments on two benchmark datasets demonstrate that the unified MMIN model significantly improves emotion recognition performance under both uncertain missing-modality testing conditions and full-modality ideal testing condition. The code will be available at https://github.com/AIM3-RUC/MMIN.", "doc_id": "eb565b8e7c12be5ed4c640b5c7c3b19d", "publication_year": 2021, "sentences": ["multimodal fusion has been proved to improve emotion recognition performance in previous works .", "however , in real - world applications , we often encounter the problem of missing modality , and which modalities will be missing is uncertain .", "it makes the fixed multimodal fusion fail in such cases .", "in this work , we propose a unified model , missing modality imagination network ( mmin ) , to deal with the uncertain missing modality problem .", "mmin learns robust joint multimodal representations , which can predict the representation of any missing modality given available modalities under different missing modality conditions .", "comprehensive experiments on two benchmark datasets demonstrate that the unified mmin model significantly improves emotion recognition performance under both uncertain missing - modality testing conditions and full - modality ideal testing condition .", "the code will be available at https : / / github . com / aim3 - ruc / mmin ."], "events": [{"event_type": "ITT", "arguments": [{"text": "emotion recognition", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["emotion", "recognition"], "offsets": [7, 8]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "in real - world applications", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "real", "-", "world", "applications"], "offsets": [16, 17, 18, 19, 20]}, {"text": "problem of missing modality", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["problem", "of", "missing", "modality"], "offsets": [26, 27, 28, 29]}], "trigger": {"text": "encounter", "tokens": ["encounter"], "offsets": [24]}}, {"event_type": "RWF", "arguments": [{"text": "fixed multimodal fusion fail", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["fixed", "multimodal", "fusion", "fail"], "offsets": [43, 44, 45, 46]}], "trigger": {"text": "makes", "tokens": ["makes"], "offsets": [41]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [55]}, {"text": "deal", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["deal"], "offsets": [70]}, {"text": "missing modality imagination network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["missing", "modality", "imagination", "network"], "offsets": [61, 62, 63, 64]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [56]}}, {"event_type": "PUR", "arguments": [{"text": "uncertain missing modality problem", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["uncertain", "missing", "modality", "problem"], "offsets": [73, 74, 75, 76]}], "trigger": {"text": "deal", "tokens": ["deal"], "offsets": [70]}}, {"event_type": "FIN", "arguments": [{"text": "significantly improves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["significantly", "improves"], "offsets": [115, 116]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [109]}}, {"event_type": "FAC", "arguments": [{"text": "emotion recognition performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["emotion", "recognition", "performance"], "offsets": [117, 118, 119]}, {"text": "under both uncertain missing - modality testing conditions and full - modality ideal testing condition", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["under", "both", "uncertain", "missing", "-", "modality", "testing", "conditions", "and", "full", "-", "modality", "ideal", "testing", "condition"], "offsets": [120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134]}, {"text": "unified mmin model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["unified", "missing", "modality", "imagination", "network", "model"], "offsets": [112, 61, 62, 63, 64, 114]}], "trigger": {"text": "significantly improves", "tokens": ["significantly", "improves"], "offsets": [115, 116]}}, {"event_type": "RWF", "arguments": [{"text": "uncertain", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["uncertain"], "offsets": [38]}], "trigger": {"text": "uncertain", "tokens": ["uncertain"], "offsets": [38]}}, {"event_type": "MDS", "arguments": [{"text": "multimodal representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["multimodal", "representations"], "offsets": [82, 83]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [79]}}], "document": ["multimodal", "fusion", "has", "been", "proved", "to", "improve", "emotion", "recognition", "performance", "in", "previous", "works", ".", "however", ",", "in", "real", "-", "world", "applications", ",", "we", "often", "encounter", "the", "problem", "of", "missing", "modality", ",", "and", "which", "modalities", "will", "be", "missing", "is", "uncertain", ".", "it", "makes", "the", "fixed", "multimodal", "fusion", "fail", "in", "such", "cases", ".", "in", "this", "work", ",", "we", "propose", "a", "unified", "model", ",", "missing", "modality", "imagination", "network", "(", "mmin", ")", ",", "to", "deal", "with", "the", "uncertain", "missing", "modality", "problem", ".", "mmin", "learns", "robust", "joint", "multimodal", "representations", ",", "which", "can", "predict", "the", "representation", "of", "any", "missing", "modality", "given", "available", "modalities", "under", "different", "missing", "modality", "conditions", ".", "comprehensive", "experiments", "on", "two", "benchmark", "datasets", "demonstrate", "that", "the", "unified", "mmin", "model", "significantly", "improves", "emotion", "recognition", "performance", "under", "both", "uncertain", "missing", "-", "modality", "testing", "conditions", "and", "full", "-", "modality", "ideal", "testing", "condition", ".", "the", "code", "will", "be", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "aim3", "-", "ruc", "/", "mmin", "."]}, {"venue": "ACL", "title": "Automatic Fake News Detection: Are Models Learning to Reason?", "abstract": "Most fact checking models for automatic fake news detection are based on reasoning: given a claim with associated evidence, the models aim to estimate the claim veracity based on the supporting or refuting content within the evidence. When these models perform well, it is generally assumed to be due to the models having learned to reason over the evidence with regards to the claim. In this paper, we investigate this assumption of reasoning, by exploring the relationship and importance of both claim and evidence. Surprisingly, we find on political fact checking datasets that most often the highest effectiveness is obtained by utilizing only the evidence, as the impact of including the claim is either negligible or harmful to the effectiveness. This highlights an important problem in what constitutes evidence in existing approaches for automatic fake news detection.", "doc_id": "d8c637731f070f7f09b9cd7c161fd8ef", "publication_year": 2021, "sentences": ["most fact checking models for automatic fake news detection are based on reasoning : given a claim with associated evidence , the models aim to estimate the claim veracity based on the supporting or refuting content within the evidence .", "when these models perform well , it is generally assumed to be due to the models having learned to reason over the evidence with regards to the claim .", "in this paper , we investigate this assumption of reasoning , by exploring the relationship and importance of both claim and evidence .", "surprisingly , we find on political fact checking datasets that most often the highest effectiveness is obtained by utilizing only the evidence , as the impact of including the claim is either negligible or harmful to the effectiveness .", "this highlights an important problem in what constitutes evidence in existing approaches for automatic fake news detection ."], "events": [{"event_type": "ITT", "arguments": [{"text": "automatic fake news detection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["automatic", "fake", "news", "detection"], "offsets": [5, 6, 7, 8]}], "trigger": {"text": "reasoning", "tokens": ["reasoning"], "offsets": [12]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [73]}, {"text": "investigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["investigate"], "offsets": [74]}, {"text": "relationship of both claim and evidence", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["relationship", "of", "both", "claim", "and", "evidence"], "offsets": [83, 86, 87, 88, 89, 90]}, {"text": "importance of both claim and evidence", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["importance", "of", "both", "claim", "and", "evidence"], "offsets": [85, 86, 87, 88, 89, 90]}], "trigger": {"text": "exploring", "tokens": ["exploring"], "offsets": [81]}}, {"event_type": "PUR", "arguments": [{"text": "assumption of reasoning", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["assumption", "of", "reasoning"], "offsets": [76, 77, 78]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [74]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [94]}, {"text": "obtained", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["obtained"], "offsets": [108]}, {"text": "negligible or harmful", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["negligible", "or", "harmful"], "offsets": [124, 125, 126]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [95]}}, {"event_type": "FAC", "arguments": [{"text": "evidence", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["evidence"], "offsets": [113]}, {"text": "political fact checking datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["political", "fact", "checking", "datasets"], "offsets": [97, 98, 99, 100]}, {"text": "highest effectiveness", "nugget_type": "STR", "argument_type": "Object", "tokens": ["highest", "effectiveness"], "offsets": [105, 106]}], "trigger": {"text": "obtained", "tokens": ["obtained"], "offsets": [108]}}, {"event_type": "RWS", "arguments": [{"text": "estimate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["estimate"], "offsets": [25]}, {"text": "supporting or refuting content", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["supporting", "or", "refuting", "content"], "offsets": [32, 33, 34, 35]}, {"text": "evidence", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["evidence"], "offsets": [38]}, {"text": "given a claim with associated evidence", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["given", "a", "claim", "with", "associated", "evidence"], "offsets": [14, 15, 16, 17, 18, 19]}, {"text": "most fact checking models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["most", "fact", "checking", "models"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "based", "tokens": ["based"], "offsets": [29]}}, {"event_type": "PUR", "arguments": [{"text": "claim veracity", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["claim", "veracity"], "offsets": [27, 28]}], "trigger": {"text": "estimate", "tokens": ["estimate"], "offsets": [25]}}, {"event_type": "FAC", "arguments": [{"text": "impact of including the claim", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["impact", "of", "including", "the", "claim"], "offsets": [117, 118, 119, 120, 121]}, {"text": "effectiveness", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["effectiveness"], "offsets": [129]}], "trigger": {"text": "negligible or harmful", "tokens": ["negligible", "or", "harmful"], "offsets": [124, 125, 126]}}], "document": ["most", "fact", "checking", "models", "for", "automatic", "fake", "news", "detection", "are", "based", "on", "reasoning", ":", "given", "a", "claim", "with", "associated", "evidence", ",", "the", "models", "aim", "to", "estimate", "the", "claim", "veracity", "based", "on", "the", "supporting", "or", "refuting", "content", "within", "the", "evidence", ".", "when", "these", "models", "perform", "well", ",", "it", "is", "generally", "assumed", "to", "be", "due", "to", "the", "models", "having", "learned", "to", "reason", "over", "the", "evidence", "with", "regards", "to", "the", "claim", ".", "in", "this", "paper", ",", "we", "investigate", "this", "assumption", "of", "reasoning", ",", "by", "exploring", "the", "relationship", "and", "importance", "of", "both", "claim", "and", "evidence", ".", "surprisingly", ",", "we", "find", "on", "political", "fact", "checking", "datasets", "that", "most", "often", "the", "highest", "effectiveness", "is", "obtained", "by", "utilizing", "only", "the", "evidence", ",", "as", "the", "impact", "of", "including", "the", "claim", "is", "either", "negligible", "or", "harmful", "to", "the", "effectiveness", ".", "this", "highlights", "an", "important", "problem", "in", "what", "constitutes", "evidence", "in", "existing", "approaches", "for", "automatic", "fake", "news", "detection", "."]}, {"venue": "ACL", "title": "Unsupervised Discovery of Gendered Language through Latent-Variable Modeling", "abstract": "Studying the ways in which language is gendered has long been an area of interest in sociolinguistics. Studies have explored, for example, the speech of male and female characters in film and the language used to describe male and female politicians. In this paper, we aim not to merely study this phenomenon qualitatively, but instead to quantify the degree to which the language used to describe men and women is different and, moreover, different in a positive or negative way. To that end, we introduce a generative latent-variable model that jointly represents adjective (or verb) choice, with its sentiment, given the natural gender of a head (or dependent) noun. We find that there are significant differences between descriptions of male and female nouns and that these differences align with common gender stereotypes: Positive adjectives used to describe women are more often related to their bodies than adjectives used to describe men.", "doc_id": "1b3c03e651ad7cb1f61bf66eff06b93a", "publication_year": 2019, "sentences": ["studying the ways in which language is gendered has long been an area of interest in sociolinguistics .", "studies have explored , for example , the speech of male and female characters in film and the language used to describe male and female politicians .", "in this paper , we aim not to merely study this phenomenon qualitatively , but instead to quantify the degree to which the language used to describe men and women is different and , moreover , different in a positive or negative way .", "to that end , we introduce a generative latent - variable model that jointly represents adjective ( or verb ) choice , with its sentiment , given the natural gender of a head ( or dependent ) noun .", "we find that there are significant differences between descriptions of male and female nouns and that these differences align with common gender stereotypes :", "positive adjectives used to describe women are more often related to their bodies than adjectives used to describe men ."], "events": [{"event_type": "ITT", "arguments": [{"text": "in sociolinguistics", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "sociolinguistics"], "offsets": [15, 16]}, {"text": "ways in which language is gendered", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["ways", "in", "which", "language", "is", "gendered"], "offsets": [2, 3, 4, 5, 6, 7]}], "trigger": {"text": "interest", "tokens": ["interest"], "offsets": [14]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [49]}, {"text": "ways in which language is gendered", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["ways", "in", "which", "language", "is", "gendered"], "offsets": [2, 3, 4, 5, 6, 7]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [54]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [49]}, {"text": "degree to which the language used to describe men and women is different", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["degree", "to", "which", "the", "language", "used", "to", "describe", "men", "and", "women", "is", "different"], "offsets": [64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76]}, {"text": "different in a positive or negative way", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["different", "in", "a", "positive", "or", "negative", "way"], "offsets": [81, 82, 83, 84, 85, 86, 87]}], "trigger": {"text": "quantify", "tokens": ["quantify"], "offsets": [62]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [93]}, {"text": "generative latent - variable model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["generative", "latent", "-", "variable", "model"], "offsets": [96, 97, 98, 99, 100]}, {"text": "jointly represents", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["jointly", "represents"], "offsets": [102, 103]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [94]}}, {"event_type": "PUR", "arguments": [{"text": "adjective ( or verb ) choice", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["adjective", "(", "or", "verb", ")", "choice"], "offsets": [104, 105, 106, 107, 108, 109]}, {"text": "sentiment", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["sentiment"], "offsets": [113]}, {"text": "given the natural gender of a head ( or dependent ) noun", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["given", "the", "natural", "gender", "of", "a", "head", "(", "or", "dependent", ")", "noun"], "offsets": [115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126]}], "trigger": {"text": "jointly represents", "tokens": ["jointly", "represents"], "offsets": [102, 103]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [128]}, {"text": "align", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["align"], "offsets": [146]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [129]}}, {"event_type": "FAC", "arguments": [{"text": "common gender stereotypes", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["common", "gender", "stereotypes"], "offsets": [148, 149, 150]}, {"text": "significant differences between descriptions of male and female nouns", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["significant", "differences", "between", "descriptions", "of", "male", "and", "female", "nouns"], "offsets": [133, 134, 135, 136, 137, 138, 139, 140, 141]}], "trigger": {"text": "align", "tokens": ["align"], "offsets": [146]}}, {"event_type": "CMP", "arguments": [{"text": "positive adjectives used to describe women", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["positive", "adjectives", "used", "to", "describe", "women"], "offsets": [152, 153, 154, 155, 156, 157]}, {"text": "adjectives used to describe men", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["adjectives", "used", "to", "describe", "men"], "offsets": [166, 167, 168, 169, 170]}, {"text": "more often", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "often"], "offsets": [159, 160]}], "trigger": {"text": "related", "tokens": ["related"], "offsets": [161]}}], "document": ["studying", "the", "ways", "in", "which", "language", "is", "gendered", "has", "long", "been", "an", "area", "of", "interest", "in", "sociolinguistics", ".", "studies", "have", "explored", ",", "for", "example", ",", "the", "speech", "of", "male", "and", "female", "characters", "in", "film", "and", "the", "language", "used", "to", "describe", "male", "and", "female", "politicians", ".", "in", "this", "paper", ",", "we", "aim", "not", "to", "merely", "study", "this", "phenomenon", "qualitatively", ",", "but", "instead", "to", "quantify", "the", "degree", "to", "which", "the", "language", "used", "to", "describe", "men", "and", "women", "is", "different", "and", ",", "moreover", ",", "different", "in", "a", "positive", "or", "negative", "way", ".", "to", "that", "end", ",", "we", "introduce", "a", "generative", "latent", "-", "variable", "model", "that", "jointly", "represents", "adjective", "(", "or", "verb", ")", "choice", ",", "with", "its", "sentiment", ",", "given", "the", "natural", "gender", "of", "a", "head", "(", "or", "dependent", ")", "noun", ".", "we", "find", "that", "there", "are", "significant", "differences", "between", "descriptions", "of", "male", "and", "female", "nouns", "and", "that", "these", "differences", "align", "with", "common", "gender", "stereotypes", ":", "positive", "adjectives", "used", "to", "describe", "women", "are", "more", "often", "related", "to", "their", "bodies", "than", "adjectives", "used", "to", "describe", "men", "."]}, {"venue": "ACL", "title": "Value-Agnostic Conversational Semantic Parsing", "abstract": "Conversational semantic parsers map user utterances to executable programs given dialogue histories composed of previous utterances, programs, and system responses. Existing parsers typically condition on rich representations of history that include the complete set of values and computations previously discussed. We propose a model that abstracts over values to focus prediction on type- and function-level context. This approach provides a compact encoding of dialogue histories and predicted programs, improving generalization and computational efficiency. Our model incorporates several other components, including an atomic span copy operation and structural enforcement of well-formedness constraints on predicted programs, that are particularly advantageous in the low-data regime. Trained on the SMCalFlow and TreeDST datasets, our model outperforms prior work by 7.3% and 10.6% respectively in terms of absolute accuracy. Trained on only a thousand examples from each dataset, it outperforms strong baselines by 12.4% and 6.4%. These results indicate that simple representations are key to effective generalization in conversational semantic parsing.", "doc_id": "7c4f66288724b1d9ce1295c187b5e1f4", "publication_year": 2021, "sentences": ["conversational semantic parsers map user utterances to executable programs given dialogue histories composed of previous utterances , programs , and system responses .", "existing parsers typically condition on rich representations of history that include the complete set of values and computations previously discussed .", "we propose a model that abstracts over values to focus prediction on type - and function - level context .", "this approach provides a compact encoding of dialogue histories and predicted programs , improving generalization and computational efficiency .", "our model incorporates several other components , including an atomic span copy operation and structural enforcement of well - formedness constraints on predicted programs , that are particularly advantageous in the low - data regime .", "trained on the smcalflow and treedst datasets , our model outperforms prior work by 7 . 3 % and 10 . 6 % respectively in terms of absolute accuracy .", "trained on only a thousand examples from each dataset , it outperforms strong baselines by 12 . 4 % and 6 . 4 % .", "these results indicate that simple representations are key to effective generalization in conversational semantic parsing ."], "events": [{"event_type": "RWS", "arguments": [{"text": "existing parsers", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["existing", "parsers"], "offsets": [23, 24]}, {"text": "rich representations of history", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["rich", "representations", "of", "history"], "offsets": [28, 29, 30, 31]}], "trigger": {"text": "condition", "tokens": ["condition"], "offsets": [26]}}, {"event_type": "ITT", "arguments": [{"text": "conversational semantic parsers", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["conversational", "semantic", "parsers"], "offsets": [0, 1, 2]}], "trigger": {"text": "map", "tokens": ["map"], "offsets": [3]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [44]}, {"text": "model that abstracts over values", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model", "that", "abstracts", "over", "values"], "offsets": [47, 48, 49, 50, 51]}, {"text": "focus", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["focus"], "offsets": [53]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [45]}}, {"event_type": "PUR", "arguments": [{"text": "prediction", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["prediction"], "offsets": [54]}, {"text": "on type - and function - level context", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "type", "-", "and", "function", "-", "level", "context"], "offsets": [55, 56, 57, 58, 59, 60, 61, 62]}], "trigger": {"text": "focus", "tokens": ["focus"], "offsets": [53]}}, {"event_type": "WKS", "arguments": [{"text": "compact encoding of dialogue histories", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["compact", "encoding", "of", "dialogue", "histories"], "offsets": [68, 69, 70, 71, 72]}, {"text": "predicted programs", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["predicted", "programs"], "offsets": [74, 75]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [66]}}, {"event_type": "MDS", "arguments": [{"text": "atomic span copy operation", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["atomic", "span", "copy", "operation"], "offsets": [92, 93, 94, 95]}, {"text": "structural enforcement of well - formedness constraints", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["structural", "enforcement", "of", "well", "-", "formedness", "constraints"], "offsets": [97, 98, 99, 100, 101, 102, 103]}], "trigger": {"text": "incorporates", "tokens": ["incorporates"], "offsets": [85]}}, {"event_type": "CMP", "arguments": [{"text": "model that abstracts over values", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["model", "that", "abstracts", "over", "values"], "offsets": [47, 48, 49, 50, 51]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [129]}, {"text": "prior work", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["prior", "work"], "offsets": [130, 131]}, {"text": "smcalflow", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["smcalflow"], "offsets": [122]}, {"text": "absolute accuracy", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["absolute", "accuracy"], "offsets": [146, 147]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [129]}}, {"event_type": "CMP", "arguments": [{"text": "treedst datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["treedst", "datasets"], "offsets": [124, 125]}, {"text": "model that abstracts over values", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["model", "that", "abstracts", "over", "values"], "offsets": [47, 48, 49, 50, 51]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [129]}, {"text": "prior work", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["prior", "work"], "offsets": [130, 131]}, {"text": "10 . 6 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["10", ".", "6", "%"], "offsets": [138, 139, 140, 141]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [129]}}, {"event_type": "CMP", "arguments": [{"text": "on only a thousand examples", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "only", "a", "thousand", "examples"], "offsets": [150, 151, 152, 153, 154]}, {"text": "strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines"], "offsets": [161, 162]}, {"text": "12 . 4 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["12", ".", "4", "%"], "offsets": [164, 165, 166, 167]}, {"text": "smcalflow", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["smcalflow"], "offsets": [122]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [160]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [160]}}, {"event_type": "CMP", "arguments": [{"text": "on only a thousand examples", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "only", "a", "thousand", "examples"], "offsets": [150, 151, 152, 153, 154]}, {"text": "treedst datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["treedst", "datasets"], "offsets": [124, 125]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [160]}, {"text": "model that abstracts over values", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["model", "that", "abstracts", "over", "values"], "offsets": [47, 48, 49, 50, 51]}, {"text": "strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines"], "offsets": [161, 162]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [160]}}, {"event_type": "FIN", "arguments": [{"text": "key", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["key"], "offsets": [181]}], "trigger": {"text": "indicate", "tokens": ["indicate"], "offsets": [176]}}, {"event_type": "FAC", "arguments": [{"text": "simple representations", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["simple", "representations"], "offsets": [178, 179]}, {"text": "effective generalization in conversational semantic parsing", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["effective", "generalization", "in", "conversational", "semantic", "parsing"], "offsets": [183, 184, 185, 186, 187, 188]}], "trigger": {"text": "key", "tokens": ["key"], "offsets": [181]}}], "document": ["conversational", "semantic", "parsers", "map", "user", "utterances", "to", "executable", "programs", "given", "dialogue", "histories", "composed", "of", "previous", "utterances", ",", "programs", ",", "and", "system", "responses", ".", "existing", "parsers", "typically", "condition", "on", "rich", "representations", "of", "history", "that", "include", "the", "complete", "set", "of", "values", "and", "computations", "previously", "discussed", ".", "we", "propose", "a", "model", "that", "abstracts", "over", "values", "to", "focus", "prediction", "on", "type", "-", "and", "function", "-", "level", "context", ".", "this", "approach", "provides", "a", "compact", "encoding", "of", "dialogue", "histories", "and", "predicted", "programs", ",", "improving", "generalization", "and", "computational", "efficiency", ".", "our", "model", "incorporates", "several", "other", "components", ",", "including", "an", "atomic", "span", "copy", "operation", "and", "structural", "enforcement", "of", "well", "-", "formedness", "constraints", "on", "predicted", "programs", ",", "that", "are", "particularly", "advantageous", "in", "the", "low", "-", "data", "regime", ".", "trained", "on", "the", "smcalflow", "and", "treedst", "datasets", ",", "our", "model", "outperforms", "prior", "work", "by", "7", ".", "3", "%", "and", "10", ".", "6", "%", "respectively", "in", "terms", "of", "absolute", "accuracy", ".", "trained", "on", "only", "a", "thousand", "examples", "from", "each", "dataset", ",", "it", "outperforms", "strong", "baselines", "by", "12", ".", "4", "%", "and", "6", ".", "4", "%", ".", "these", "results", "indicate", "that", "simple", "representations", "are", "key", "to", "effective", "generalization", "in", "conversational", "semantic", "parsing", "."]}, {"venue": "ACL", "title": "Estimating the Entropy of Linguistic Distributions", "abstract": "Shannon entropy is often a quantity of interest to linguists studying the communicative capacity of human language. However, entropymust typically be estimated from observed data because researchers do not have access to the underlying probability distribution. While entropy estimation is a well-studied problem in other fields, there is not yet a comprehensive exploration of the efficacy of entropy estimators for use with linguistic data. In this work, we fill this void, studying the empirical effectiveness of different entropy estimators for linguistic distributions. In a replication of two recent information-theoretic linguistic studies, we find evidence that the reported effect size is over-estimated due to over-reliance on poor entropy estimators. We end this paper with a concrete recommendation for the entropy estimators that should be used in future linguistic studies.", "doc_id": "ed42d72eb78e70adb58948a6346d6aa7", "publication_year": 2022, "sentences": ["shannon entropy is often a quantity of interest to linguists studying the communicative capacity of human language .", "however , entropymust typically be estimated from observed data because researchers do not have access to the underlying probability distribution .", "while entropy estimation is a well - studied problem in other fields , there is not yet a comprehensive exploration of the efficacy of entropy estimators for use with linguistic data .", "in this work , we fill this void , studying the empirical effectiveness of different entropy estimators for linguistic distributions .", "in a replication of two recent information - theoretic linguistic studies , we find evidence that the reported effect size is over - estimated due to over - reliance on poor entropy estimators .", "we end this paper with a concrete recommendation for the entropy estimators that should be used in future linguistic studies ."], "events": [{"event_type": "ITT", "arguments": [{"text": "shannon entropy", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["shannon", "entropy"], "offsets": [0, 1]}], "trigger": {"text": "quantity of interest", "tokens": ["quantity", "of", "interest"], "offsets": [5, 6, 7]}}, {"event_type": "RWF", "arguments": [{"text": "not have access", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "have", "access"], "offsets": [30, 31, 32]}], "trigger": {"text": "not have access", "tokens": ["not", "have", "access"], "offsets": [30, 31, 32]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [75]}, {"text": "efficacy of entropy estimators for use with linguistic data", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["efficacy", "of", "entropy", "estimators", "for", "use", "with", "linguistic", "data"], "offsets": [61, 62, 63, 64, 65, 66, 67, 68, 69]}], "trigger": {"text": "fill", "tokens": ["fill"], "offsets": [76]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [75]}, {"text": "empirical effectiveness of different entropy estimators for linguistic distributions", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["empirical", "effectiveness", "of", "different", "entropy", "estimators", "for", "linguistic", "distributions"], "offsets": [82, 83, 84, 85, 86, 87, 88, 89, 90]}], "trigger": {"text": "studying", "tokens": ["studying"], "offsets": [80]}}, {"event_type": "RWF", "arguments": [{"text": "reported effect size", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["reported", "effect", "size"], "offsets": [109, 110, 111]}], "trigger": {"text": "over - estimated", "tokens": ["over", "-", "estimated"], "offsets": [113, 114, 115]}}, {"event_type": "RWF", "arguments": [{"text": "over - reliance", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["over", "-", "reliance"], "offsets": [118, 119, 120]}], "trigger": {"text": "over - reliance", "tokens": ["over", "-", "reliance"], "offsets": [118, 119, 120]}}], "document": ["shannon", "entropy", "is", "often", "a", "quantity", "of", "interest", "to", "linguists", "studying", "the", "communicative", "capacity", "of", "human", "language", ".", "however", ",", "entropymust", "typically", "be", "estimated", "from", "observed", "data", "because", "researchers", "do", "not", "have", "access", "to", "the", "underlying", "probability", "distribution", ".", "while", "entropy", "estimation", "is", "a", "well", "-", "studied", "problem", "in", "other", "fields", ",", "there", "is", "not", "yet", "a", "comprehensive", "exploration", "of", "the", "efficacy", "of", "entropy", "estimators", "for", "use", "with", "linguistic", "data", ".", "in", "this", "work", ",", "we", "fill", "this", "void", ",", "studying", "the", "empirical", "effectiveness", "of", "different", "entropy", "estimators", "for", "linguistic", "distributions", ".", "in", "a", "replication", "of", "two", "recent", "information", "-", "theoretic", "linguistic", "studies", ",", "we", "find", "evidence", "that", "the", "reported", "effect", "size", "is", "over", "-", "estimated", "due", "to", "over", "-", "reliance", "on", "poor", "entropy", "estimators", ".", "we", "end", "this", "paper", "with", "a", "concrete", "recommendation", "for", "the", "entropy", "estimators", "that", "should", "be", "used", "in", "future", "linguistic", "studies", "."]}, {"venue": "ACL", "title": "On Efficiently Acquiring Annotations for Multilingual Models", "abstract": "When tasked with supporting multiple languages for a given problem, two approaches have arisen: training a model for each language with the annotation budget divided equally among them, and training on a high-resource language followed by zero-shot transfer to the remaining languages. In this work, we show that the strategy of joint learning across multiple languages using a single model performs substantially better than the aforementioned alternatives. We also demonstrate that active learning provides additional, complementary benefits. We show that this simple approach enables the model to be data efficient by allowing it to arbitrate its annotation budget to query languages it is less certain on. We illustrate the effectiveness of our proposed method on a diverse set of tasks: a classification task with 4 languages, a sequence tagging task with 4 languages and a dependency parsing task with 5 languages. Our proposed method, whilst simple, substantially outperforms the other viable alternatives for building a model in a multilingual setting under constrained budgets.", "doc_id": "fcf23b115e2cdaef507052fd8a6be1cf", "publication_year": 2022, "sentences": ["when tasked with supporting multiple languages for a given problem , two approaches have arisen : training a model for each language with the annotation budget divided equally among them , and training on a high - resource language followed by zero - shot transfer to the remaining languages .", "in this work , we show that the strategy of joint learning across multiple languages using a single model performs substantially better than the aforementioned alternatives .", "we also demonstrate that active learning provides additional , complementary benefits .", "we show that this simple approach enables the model to be data efficient by allowing it to arbitrate its annotation budget to query languages it is less certain on .", "we illustrate the effectiveness of our proposed method on a diverse set of tasks : a classification task with 4 languages , a sequence tagging task with 4 languages and a dependency parsing task with 5 languages .", "our proposed method , whilst simple , substantially outperforms the other viable alternatives for building a model in a multilingual setting under constrained budgets ."], "events": [{"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [54]}, {"text": "performs", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["performs"], "offsets": [69]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [55]}}, {"event_type": "CMP", "arguments": [{"text": "strategy of joint learning across multiple languages using a single model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["strategy", "of", "joint", "learning", "across", "multiple", "languages", "using", "a", "single", "model"], "offsets": [58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68]}, {"text": "training on a high - resource language", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["training", "on", "a", "high", "-", "resource", "language"], "offsets": [32, 33, 34, 35, 36, 37, 38]}, {"text": "substantially better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["substantially", "better"], "offsets": [70, 71]}, {"text": "training a model for each language", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["training", "a", "model", "for", "each", "language"], "offsets": [16, 17, 18, 19, 20, 21]}], "trigger": {"text": "performs", "tokens": ["performs"], "offsets": [69]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [77]}, {"text": "provides", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["provides"], "offsets": [83]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [79]}}, {"event_type": "FAC", "arguments": [{"text": "active learning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["active", "learning"], "offsets": [81, 82]}, {"text": "additional , complementary benefits", "nugget_type": "STR", "argument_type": "Object", "tokens": ["additional", ",", "complementary", "benefits"], "offsets": [84, 85, 86, 87]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [83]}}, {"event_type": "FAC", "arguments": [{"text": "effectiveness", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["effectiveness"], "offsets": [122]}, {"text": "on a diverse set of tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "a", "diverse", "set", "of", "tasks"], "offsets": [127, 128, 129, 130, 131, 132]}], "trigger": {"text": "illustrate", "tokens": ["illustrate"], "offsets": [120]}}, {"event_type": "CMP", "arguments": [{"text": "method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["method"], "offsets": [159]}, {"text": "other viable alternatives", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["other", "viable", "alternatives"], "offsets": [167, 168, 169]}, {"text": "substantially outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["substantially", "outperforms"], "offsets": [164, 165]}], "trigger": {"text": "substantially outperforms", "tokens": ["substantially", "outperforms"], "offsets": [164, 165]}}, {"event_type": "MDS", "arguments": [{"text": "enables", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enables"], "offsets": [95]}, {"text": "query languages", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["query", "languages"], "offsets": [111, 112]}, {"text": "annotation budget", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["annotation", "budget"], "offsets": [108, 109]}], "trigger": {"text": "arbitrate", "tokens": ["arbitrate"], "offsets": [106]}}, {"event_type": "PUR", "arguments": [{"text": "model to be data efficient", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["model", "to", "be", "data", "efficient"], "offsets": [97, 98, 99, 100, 101]}], "trigger": {"text": "enables", "tokens": ["enables"], "offsets": [95]}}], "document": ["when", "tasked", "with", "supporting", "multiple", "languages", "for", "a", "given", "problem", ",", "two", "approaches", "have", "arisen", ":", "training", "a", "model", "for", "each", "language", "with", "the", "annotation", "budget", "divided", "equally", "among", "them", ",", "and", "training", "on", "a", "high", "-", "resource", "language", "followed", "by", "zero", "-", "shot", "transfer", "to", "the", "remaining", "languages", ".", "in", "this", "work", ",", "we", "show", "that", "the", "strategy", "of", "joint", "learning", "across", "multiple", "languages", "using", "a", "single", "model", "performs", "substantially", "better", "than", "the", "aforementioned", "alternatives", ".", "we", "also", "demonstrate", "that", "active", "learning", "provides", "additional", ",", "complementary", "benefits", ".", "we", "show", "that", "this", "simple", "approach", "enables", "the", "model", "to", "be", "data", "efficient", "by", "allowing", "it", "to", "arbitrate", "its", "annotation", "budget", "to", "query", "languages", "it", "is", "less", "certain", "on", ".", "we", "illustrate", "the", "effectiveness", "of", "our", "proposed", "method", "on", "a", "diverse", "set", "of", "tasks", ":", "a", "classification", "task", "with", "4", "languages", ",", "a", "sequence", "tagging", "task", "with", "4", "languages", "and", "a", "dependency", "parsing", "task", "with", "5", "languages", ".", "our", "proposed", "method", ",", "whilst", "simple", ",", "substantially", "outperforms", "the", "other", "viable", "alternatives", "for", "building", "a", "model", "in", "a", "multilingual", "setting", "under", "constrained", "budgets", "."]}, {"venue": "ACL", "title": "Investigating Word-Class Distributions in Word Vector Spaces", "abstract": "This paper presents an investigation on the distribution of word vectors belonging to a certain word class in a pre-trained word vector space. To this end, we made several assumptions about the distribution, modeled the distribution accordingly, and validated each assumption by comparing the goodness of each model. Specifically, we considered two types of word classes \u2013 the semantic class of direct objects of a verb and the semantic class in a thesaurus \u2013 and tried to build models that properly estimate how likely it is that a word in the vector space is a member of a given word class. Our results on selectional preference and WordNet datasets show that the centroid-based model will fail to achieve good enough performance, the geometry of the distribution and the existence of subgroups will have limited impact, and also the negative instances need to be considered for adequate modeling of the distribution. We further investigated the relationship between the scores calculated by each model and the degree of membership and found that discriminative learning-based models are best in finding the boundaries of a class, while models based on the offset between positive and negative instances perform best in determining the degree of membership.", "doc_id": "4536d956f8b2a1263abc569f5ff88b62", "publication_year": 2020, "sentences": ["this paper presents an investigation on the distribution of word vectors belonging to a certain word class in a pre - trained word vector space .", "to this end , we made several assumptions about the distribution , modeled the distribution accordingly , and validated each assumption by comparing the goodness of each model .", "specifically , we considered two types of word classes \u2013 the semantic class of direct objects of a verb and the semantic class in a thesaurus \u2013 and tried to build models that properly estimate how likely it is that a word in the vector space is a member of a given word class .", "our results on selectional preference and wordnet datasets show that the centroid - based model will fail to achieve good enough performance , the geometry of the distribution and the existence of subgroups will have limited impact , and also the negative instances need to be considered for adequate modeling of the distribution .", "we further investigated the relationship between the scores calculated by each model and the degree of membership and found that discriminative learning - based models are best in finding the boundaries of a class , while models based on the offset between positive and negative instances perform best in determining the degree of membership ."], "events": [{"event_type": "WKS", "arguments": [{"text": "investigation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["investigation"], "offsets": [4]}, {"text": "distribution of word vectors belonging to a certain word class", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["distribution", "of", "word", "vectors", "belonging", "to", "a", "certain", "word", "class"], "offsets": [7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}, {"text": "in a pre - trained word vector space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "pre", "-", "trained", "word", "vector", "space"], "offsets": [17, 18, 19, 20, 21, 22, 23, 24]}], "trigger": {"text": "presents", "tokens": ["presents"], "offsets": [2]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [30]}, {"text": "several assumptions", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["several", "assumptions"], "offsets": [32, 33]}, {"text": "distribution", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["distribution"], "offsets": [36]}], "trigger": {"text": "made", "tokens": ["made"], "offsets": [31]}}, {"event_type": "WKS", "arguments": [{"text": "distribution", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["distribution"], "offsets": [40]}], "trigger": {"text": "modeled", "tokens": ["modeled"], "offsets": [38]}}, {"event_type": "WKS", "arguments": [{"text": "goodness of each model", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["goodness", "of", "each", "model"], "offsets": [50, 51, 52, 53]}, {"text": "validated", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["validated"], "offsets": [44]}], "trigger": {"text": "comparing", "tokens": ["comparing"], "offsets": [48]}}, {"event_type": "PUR", "arguments": [{"text": "each assumption", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["each", "assumption"], "offsets": [45, 46]}], "trigger": {"text": "validated", "tokens": ["validated"], "offsets": [44]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [57]}, {"text": "in a thesaurus", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "thesaurus"], "offsets": [78, 79, 80]}, {"text": "semantic class of direct objects of a verb", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["semantic", "class", "of", "direct", "objects", "of", "a", "verb"], "offsets": [66, 67, 68, 69, 70, 71, 72, 73]}, {"text": "semantic class", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["semantic", "class"], "offsets": [76, 77]}], "trigger": {"text": "considered", "tokens": ["considered"], "offsets": [58]}}, {"event_type": "WKS", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["models"], "offsets": [86]}, {"text": "properly estimate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["properly", "estimate"], "offsets": [88, 89]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [85]}}, {"event_type": "PUR", "arguments": [{"text": "member of a given word class", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["member", "of", "a", "given", "word", "class"], "offsets": [103, 104, 105, 106, 107, 108]}], "trigger": {"text": "properly estimate", "tokens": ["properly", "estimate"], "offsets": [88, 89]}}, {"event_type": "FIN", "arguments": [{"text": "fail to achieve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["fail", "to", "achieve"], "offsets": [126, 127, 128]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [118]}}, {"event_type": "FAC", "arguments": [{"text": "wordnet datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["wordnet", "datasets"], "offsets": [116, 117]}, {"text": "centroid - based model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["centroid", "-", "based", "model"], "offsets": [121, 122, 123, 124]}, {"text": "good enough", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["good", "enough"], "offsets": [129, 130]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance"], "offsets": [131]}], "trigger": {"text": "fail to achieve", "tokens": ["fail", "to", "achieve"], "offsets": [126, 127, 128]}}, {"event_type": "FIN", "arguments": [{"text": "have", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["have"], "offsets": [144]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [118]}}, {"event_type": "FAC", "arguments": [{"text": "geometry of the distribution", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["geometry", "of", "the", "distribution"], "offsets": [134, 135, 136, 137]}, {"text": "existence of subgroups", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["existence", "of", "subgroups"], "offsets": [140, 141, 142]}, {"text": "limited impact", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["limited", "impact"], "offsets": [145, 146]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [144]}}, {"event_type": "WKS", "arguments": [{"text": "negative instances", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["negative", "instances"], "offsets": [151, 152]}, {"text": "adequate modeling of the distribution", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["adequate", "modeling", "of", "the", "distribution"], "offsets": [158, 159, 160, 161, 162]}], "trigger": {"text": "considered", "tokens": ["considered"], "offsets": [156]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [164]}, {"text": "relationship", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["relationship"], "offsets": [168]}, {"text": "between the scores calculated by each model and the degree of membership", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "the", "scores", "calculated", "by", "each", "model", "and", "the", "degree", "of", "membership"], "offsets": [169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180]}], "trigger": {"text": "investigated", "tokens": ["investigated"], "offsets": [166]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [164]}, {"text": "best", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["best"], "offsets": [190]}], "trigger": {"text": "found", "tokens": ["found"], "offsets": [182]}}, {"event_type": "FAC", "arguments": [{"text": "discriminative learning - based models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["discriminative", "learning", "-", "based", "models"], "offsets": [184, 185, 186, 187, 188]}, {"text": "finding the boundaries of a class", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["finding", "the", "boundaries", "of", "a", "class"], "offsets": [192, 193, 194, 195, 196, 197]}], "trigger": {"text": "best", "tokens": ["best"], "offsets": [190]}}, {"event_type": "FIN", "arguments": [{"text": "perform", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["perform"], "offsets": [210]}], "trigger": {"text": "found", "tokens": ["found"], "offsets": [182]}}, {"event_type": "FAC", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["models"], "offsets": [200]}, {"text": "based on the offset between positive and negative instances", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "the", "offset", "between", "positive", "and", "negative", "instances"], "offsets": [201, 202, 203, 204, 205, 206, 207, 208, 209]}, {"text": "best", "nugget_type": "STR", "argument_type": "Object", "tokens": ["best"], "offsets": [211]}, {"text": "determining the degree of membership", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["determining", "the", "degree", "of", "membership"], "offsets": [213, 214, 215, 216, 217]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [210]}}], "document": ["this", "paper", "presents", "an", "investigation", "on", "the", "distribution", "of", "word", "vectors", "belonging", "to", "a", "certain", "word", "class", "in", "a", "pre", "-", "trained", "word", "vector", "space", ".", "to", "this", "end", ",", "we", "made", "several", "assumptions", "about", "the", "distribution", ",", "modeled", "the", "distribution", "accordingly", ",", "and", "validated", "each", "assumption", "by", "comparing", "the", "goodness", "of", "each", "model", ".", "specifically", ",", "we", "considered", "two", "types", "of", "word", "classes", "\u2013", "the", "semantic", "class", "of", "direct", "objects", "of", "a", "verb", "and", "the", "semantic", "class", "in", "a", "thesaurus", "\u2013", "and", "tried", "to", "build", "models", "that", "properly", "estimate", "how", "likely", "it", "is", "that", "a", "word", "in", "the", "vector", "space", "is", "a", "member", "of", "a", "given", "word", "class", ".", "our", "results", "on", "selectional", "preference", "and", "wordnet", "datasets", "show", "that", "the", "centroid", "-", "based", "model", "will", "fail", "to", "achieve", "good", "enough", "performance", ",", "the", "geometry", "of", "the", "distribution", "and", "the", "existence", "of", "subgroups", "will", "have", "limited", "impact", ",", "and", "also", "the", "negative", "instances", "need", "to", "be", "considered", "for", "adequate", "modeling", "of", "the", "distribution", ".", "we", "further", "investigated", "the", "relationship", "between", "the", "scores", "calculated", "by", "each", "model", "and", "the", "degree", "of", "membership", "and", "found", "that", "discriminative", "learning", "-", "based", "models", "are", "best", "in", "finding", "the", "boundaries", "of", "a", "class", ",", "while", "models", "based", "on", "the", "offset", "between", "positive", "and", "negative", "instances", "perform", "best", "in", "determining", "the", "degree", "of", "membership", "."]}, {"venue": "ACL", "title": "Online Learning Meets Machine Translation Evaluation: Finding the Best Systems with the Least Human Effort", "abstract": "In Machine Translation, assessing the quality of a large amount of automatic translations can be challenging. Automatic metrics are not reliable when it comes to high performing systems. In addition, resorting to human evaluators can be expensive, especially when evaluating multiple systems. To overcome the latter challenge, we propose a novel application of online learning that, given an ensemble of Machine Translation systems, dynamically converges to the best systems, by taking advantage of the human feedback available. Our experiments on WMT\u201919 datasets show that our online approach quickly converges to the top-3 ranked systems for the language pairs considered, despite the lack of human feedback for many translations.", "doc_id": "946a6233c1b19f49558cc802efb7e163", "publication_year": 2021, "sentences": ["in machine translation , assessing the quality of a large amount of automatic translations can be challenging .", "automatic metrics are not reliable when it comes to high performing systems .", "in addition , resorting to human evaluators can be expensive , especially when evaluating multiple systems .", "to overcome the latter challenge , we propose a novel application of online learning that , given an ensemble of machine translation systems , dynamically converges to the best systems , by taking advantage of the human feedback available .", "our experiments on wmt \u2019 19 datasets show that our online approach quickly converges to the top - 3 ranked systems for the language pairs considered , despite the lack of human feedback for many translations ."], "events": [{"event_type": "ITT", "arguments": [{"text": "machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["machine", "translation"], "offsets": [1, 2]}], "trigger": {"text": "assessing", "tokens": ["assessing"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "automatic metrics", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["automatic", "metrics"], "offsets": [18, 19]}, {"text": "not reliable", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "reliable"], "offsets": [21, 22]}], "trigger": {"text": "not reliable", "tokens": ["not", "reliable"], "offsets": [21, 22]}}, {"event_type": "RWF", "arguments": [{"text": "expensive", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["expensive"], "offsets": [40]}], "trigger": {"text": "expensive", "tokens": ["expensive"], "offsets": [40]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [54]}, {"text": "application of online learning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["application", "of", "online", "learning"], "offsets": [58, 59, 60, 61]}, {"text": "given", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["given"], "offsets": [64]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [55]}}, {"event_type": "PUR", "arguments": [{"text": "ensemble of machine translation systems", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["ensemble", "of", "machine", "translation", "systems"], "offsets": [66, 67, 68, 69, 70]}], "trigger": {"text": "given", "tokens": ["given"], "offsets": [64]}}, {"event_type": "WKS", "arguments": [{"text": "advantage of the human feedback available", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["advantage", "of", "the", "human", "feedback", "available"], "offsets": [81, 82, 83, 84, 85, 86]}, {"text": "dynamically converges", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["dynamically", "converges"], "offsets": [72, 73]}], "trigger": {"text": "taking", "tokens": ["taking"], "offsets": [80]}}, {"event_type": "PUR", "arguments": [{"text": "best systems", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["best", "systems"], "offsets": [76, 77]}], "trigger": {"text": "dynamically converges", "tokens": ["dynamically", "converges"], "offsets": [72, 73]}}, {"event_type": "FIN", "arguments": [{"text": "converges", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["converges"], "offsets": [101]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [95]}}, {"event_type": "FAC", "arguments": [{"text": "despite the lack of human feedback for many translations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["despite", "the", "lack", "of", "human", "feedback", "for", "many", "translations"], "offsets": [115, 116, 117, 118, 119, 120, 121, 122, 123]}, {"text": "application of online learning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["application", "of", "online", "learning"], "offsets": [58, 59, 60, 61]}, {"text": "top - 3 ranked systems", "nugget_type": "STR", "argument_type": "Object", "tokens": ["top", "-", "3", "ranked", "systems"], "offsets": [104, 105, 106, 107, 108]}, {"text": "language pairs considered", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["language", "pairs", "considered"], "offsets": [111, 112, 113]}, {"text": "quickly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["quickly"], "offsets": [100]}], "trigger": {"text": "converges", "tokens": ["converges"], "offsets": [101]}}], "document": ["in", "machine", "translation", ",", "assessing", "the", "quality", "of", "a", "large", "amount", "of", "automatic", "translations", "can", "be", "challenging", ".", "automatic", "metrics", "are", "not", "reliable", "when", "it", "comes", "to", "high", "performing", "systems", ".", "in", "addition", ",", "resorting", "to", "human", "evaluators", "can", "be", "expensive", ",", "especially", "when", "evaluating", "multiple", "systems", ".", "to", "overcome", "the", "latter", "challenge", ",", "we", "propose", "a", "novel", "application", "of", "online", "learning", "that", ",", "given", "an", "ensemble", "of", "machine", "translation", "systems", ",", "dynamically", "converges", "to", "the", "best", "systems", ",", "by", "taking", "advantage", "of", "the", "human", "feedback", "available", ".", "our", "experiments", "on", "wmt", "\u2019", "19", "datasets", "show", "that", "our", "online", "approach", "quickly", "converges", "to", "the", "top", "-", "3", "ranked", "systems", "for", "the", "language", "pairs", "considered", ",", "despite", "the", "lack", "of", "human", "feedback", "for", "many", "translations", "."]}, {"venue": "ACL", "title": "A Simple and Effective Unified Encoder for Document-Level Machine Translation", "abstract": "Most of the existing models for document-level machine translation adopt dual-encoder structures. The representation of the source sentences and the document-level contexts are modeled with two separate encoders. Although these models can make use of the document-level contexts, they do not fully model the interaction between the contexts and the source sentences, and can not directly adapt to the recent pre-training models (e.g., BERT) which encodes multiple sentences with a single encoder. In this work, we propose a simple and effective unified encoder that can outperform the baseline models of dual-encoder models in terms of BLEU and METEOR scores. Moreover, the pre-training models can further boost the performance of our proposed model.", "doc_id": "db27f9c45b8917eb1e3f5e17d6db5458", "publication_year": 2020, "sentences": ["most of the existing models for document - level machine translation adopt dual - encoder structures .", "the representation of the source sentences and the document - level contexts are modeled with two separate encoders .", "although these models can make use of the document - level contexts , they do not fully model the interaction between the contexts and the source sentences , and can not directly adapt to the recent pre - training models ( e . g . , bert ) which encodes multiple sentences with a single encoder .", "in this work , we propose a simple and effective unified encoder that can outperform the baseline models of dual - encoder models in terms of bleu and meteor scores .", "moreover , the pre - training models can further boost the performance of our proposed model ."], "events": [{"event_type": "RWS", "arguments": [{"text": "most of the existing models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["most", "of", "the", "existing", "models"], "offsets": [0, 1, 2, 3, 4]}, {"text": "document - level machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["document", "-", "level", "machine", "translation"], "offsets": [6, 7, 8, 9, 10]}, {"text": "dual - encoder structures", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["dual", "-", "encoder", "structures"], "offsets": [12, 13, 14, 15]}], "trigger": {"text": "adopt", "tokens": ["adopt"], "offsets": [11]}}, {"event_type": "RWS", "arguments": [{"text": "two separate encoders", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["two", "separate", "encoders"], "offsets": [32, 33, 34]}, {"text": "document - level contexts", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["document", "-", "level", "contexts"], "offsets": [25, 26, 27, 28]}, {"text": "representation of the source sentences", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["representation", "of", "the", "source", "sentences"], "offsets": [18, 19, 20, 21, 22]}], "trigger": {"text": "modeled", "tokens": ["modeled"], "offsets": [30]}}, {"event_type": "RWF", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["models"], "offsets": [38]}, {"text": "do not fully model", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["do", "not", "fully", "model"], "offsets": [50, 51, 52, 53]}], "trigger": {"text": "do not fully model", "tokens": ["do", "not", "fully", "model"], "offsets": [50, 51, 52, 53]}}, {"event_type": "RWF", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["models"], "offsets": [38]}, {"text": "can not directly adapt", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["can", "not", "directly", "adapt"], "offsets": [65, 66, 67, 68]}], "trigger": {"text": "can not directly adapt", "tokens": ["can", "not", "directly", "adapt"], "offsets": [65, 66, 67, 68]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [97]}, {"text": "unified encoder", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["unified", "encoder"], "offsets": [103, 104]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [98]}}, {"event_type": "CMP", "arguments": [{"text": "unified encoder", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["unified", "encoder"], "offsets": [103, 104]}, {"text": "outperform", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperform"], "offsets": [107]}, {"text": "baseline models of dual - encoder models", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["baseline", "models", "of", "dual", "-", "encoder", "models"], "offsets": [109, 110, 111, 112, 113, 114, 115]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [107]}}, {"event_type": "FAC", "arguments": [{"text": "pre - training models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["pre", "-", "training", "models"], "offsets": [127, 128, 129, 130]}, {"text": "performance of our proposed model", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance", "of", "unified", "encoder"], "offsets": [135, 136, 103, 104]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [133]}}], "document": ["most", "of", "the", "existing", "models", "for", "document", "-", "level", "machine", "translation", "adopt", "dual", "-", "encoder", "structures", ".", "the", "representation", "of", "the", "source", "sentences", "and", "the", "document", "-", "level", "contexts", "are", "modeled", "with", "two", "separate", "encoders", ".", "although", "these", "models", "can", "make", "use", "of", "the", "document", "-", "level", "contexts", ",", "they", "do", "not", "fully", "model", "the", "interaction", "between", "the", "contexts", "and", "the", "source", "sentences", ",", "and", "can", "not", "directly", "adapt", "to", "the", "recent", "pre", "-", "training", "models", "(", "e", ".", "g", ".", ",", "bert", ")", "which", "encodes", "multiple", "sentences", "with", "a", "single", "encoder", ".", "in", "this", "work", ",", "we", "propose", "a", "simple", "and", "effective", "unified", "encoder", "that", "can", "outperform", "the", "baseline", "models", "of", "dual", "-", "encoder", "models", "in", "terms", "of", "bleu", "and", "meteor", "scores", ".", "moreover", ",", "the", "pre", "-", "training", "models", "can", "further", "boost", "the", "performance", "of", "our", "proposed", "model", "."]}, {"venue": "ACL", "title": "On the Robustness of Offensive Language Classifiers", "abstract": "Social media platforms are deploying machine learning based offensive language classification systems to combat hateful, racist, and other forms of offensive speech at scale. However, despite their real-world deployment, we do not yet comprehensively understand the extent to which offensive language classifiers are robust against adversarial attacks. Prior work in this space is limited to studying robustness of offensive language classifiers against primitive attacks such as misspellings and extraneous spaces. To address this gap, we systematically analyze the robustness of state-of-the-art offensive language classifiers against more crafty adversarial attacks that leverage greedy- and attention-based word selection and context-aware embeddings for word replacement. Our results on multiple datasets show that these crafty adversarial attacks can degrade the accuracy of offensive language classifiers by more than 50% while also being able to preserve the readability and meaning of the modified text.", "doc_id": "54824117f7581a0ae7c67626ee60b5c3", "publication_year": 2022, "sentences": ["social media platforms are deploying machine learning based offensive language classification systems to combat hateful , racist , and other forms of offensive speech at scale .", "however , despite their real - world deployment , we do not yet comprehensively understand the extent to which offensive language classifiers are robust against adversarial attacks .", "prior work in this space is limited to studying robustness of offensive language classifiers against primitive attacks such as misspellings and extraneous spaces .", "to address this gap , we systematically analyze the robustness of state - of - the - art offensive language classifiers against more crafty adversarial attacks that leverage greedy - and attention - based word selection and context - aware embeddings for word replacement .", "our results on multiple datasets show that these crafty adversarial attacks can degrade the accuracy of offensive language classifiers by more than 50 % while also being able to preserve the readability and meaning of the modified text ."], "events": [{"event_type": "ITT", "arguments": [{"text": "machine learning based offensive language classification systems", "nugget_type": "APP", "argument_type": "Target", "tokens": ["machine", "learning", "based", "offensive", "language", "classification", "systems"], "offsets": [5, 6, 7, 8, 9, 10, 11]}], "trigger": {"text": "combat", "tokens": ["combat"], "offsets": [13]}}, {"event_type": "RWF", "arguments": [{"text": "limited", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["limited"], "offsets": [61]}], "trigger": {"text": "limited", "tokens": ["limited"], "offsets": [61]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [84]}, {"text": "robustness of state - of - the - art offensive language classifiers", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["robustness", "of", "state", "-", "of", "-", "the", "-", "art", "offensive", "language", "classifiers"], "offsets": [88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]}, {"text": "against more crafty adversarial attacks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["against", "more", "crafty", "adversarial", "attacks"], "offsets": [100, 101, 102, 103, 104]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [86]}}, {"event_type": "FAC", "arguments": [{"text": "multiple datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multiple", "datasets"], "offsets": [127, 128]}, {"text": "crafty adversarial attacks", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["crafty", "adversarial", "attacks"], "offsets": [132, 133, 134]}, {"text": "accuracy of offensive language classifiers", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["accuracy", "of", "offensive", "language", "classifiers"], "offsets": [138, 139, 140, 141, 142]}, {"text": "50 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["50", "%"], "offsets": [146, 147]}, {"text": "while also being able to preserve the readability and meaning of the modified text", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "also", "being", "able", "to", "preserve", "the", "readability", "and", "meaning", "of", "the", "modified", "text"], "offsets": [148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161]}, {"text": "more", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["more"], "offsets": [144]}], "trigger": {"text": "degrade", "tokens": ["degrade"], "offsets": [136]}}, {"event_type": "MDS", "arguments": [{"text": "greedy - and attention - based word selection", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["greedy", "-", "and", "attention", "-", "based", "word", "selection"], "offsets": [107, 108, 109, 110, 111, 112, 113, 114]}, {"text": "context - aware embeddings", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["context", "-", "aware", "embeddings"], "offsets": [116, 117, 118, 119]}, {"text": "word replacement", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["word", "replacement"], "offsets": [121, 122]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [106]}}, {"event_type": "FIN", "arguments": [{"text": "degrade", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["degrade"], "offsets": [136]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [129]}}], "document": ["social", "media", "platforms", "are", "deploying", "machine", "learning", "based", "offensive", "language", "classification", "systems", "to", "combat", "hateful", ",", "racist", ",", "and", "other", "forms", "of", "offensive", "speech", "at", "scale", ".", "however", ",", "despite", "their", "real", "-", "world", "deployment", ",", "we", "do", "not", "yet", "comprehensively", "understand", "the", "extent", "to", "which", "offensive", "language", "classifiers", "are", "robust", "against", "adversarial", "attacks", ".", "prior", "work", "in", "this", "space", "is", "limited", "to", "studying", "robustness", "of", "offensive", "language", "classifiers", "against", "primitive", "attacks", "such", "as", "misspellings", "and", "extraneous", "spaces", ".", "to", "address", "this", "gap", ",", "we", "systematically", "analyze", "the", "robustness", "of", "state", "-", "of", "-", "the", "-", "art", "offensive", "language", "classifiers", "against", "more", "crafty", "adversarial", "attacks", "that", "leverage", "greedy", "-", "and", "attention", "-", "based", "word", "selection", "and", "context", "-", "aware", "embeddings", "for", "word", "replacement", ".", "our", "results", "on", "multiple", "datasets", "show", "that", "these", "crafty", "adversarial", "attacks", "can", "degrade", "the", "accuracy", "of", "offensive", "language", "classifiers", "by", "more", "than", "50", "%", "while", "also", "being", "able", "to", "preserve", "the", "readability", "and", "meaning", "of", "the", "modified", "text", "."]}, {"venue": "ACL", "title": "Attentive Multiview Text Representation for Differential Diagnosis", "abstract": "We present a text representation approach that can combine different views (representations) of the same input through effective data fusion and attention strategies for ranking purposes. We apply our model to the problem of differential diagnosis, which aims to find the most probable diseases that match with clinical descriptions of patients, using data from the Undiagnosed Diseases Network. Our model outperforms several ranking approaches (including a commercially-supported system) by effectively prioritizing and combining representations obtained from traditional and recent text representation techniques. We elaborate on several aspects of our model and shed light on its improved performance.", "doc_id": "1a1eddba73df207481a23b5372cf4f20", "publication_year": 2021, "sentences": ["we present a text representation approach that can combine different views ( representations ) of the same input through effective data fusion and attention strategies for ranking purposes .", "we apply our model to the problem of differential diagnosis , which aims to find the most probable diseases that match with clinical descriptions of patients , using data from the undiagnosed diseases network .", "our model outperforms several ranking approaches ( including a commercially - supported system ) by effectively prioritizing and combining representations obtained from traditional and recent text representation techniques .", "we elaborate on several aspects of our model and shed light on its improved performance ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "text representation approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["text", "representation", "approach"], "offsets": [3, 4, 5]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "MDS", "arguments": [{"text": "data fusion", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["data", "fusion"], "offsets": [20, 21]}, {"text": "attention strategies", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["attention", "strategies"], "offsets": [23, 24]}, {"text": "different views ( representations ) of the same input", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["different", "views", "of", "the", "same", "input"], "offsets": [9, 10, 14, 15, 16, 17]}, {"text": "ranking", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["ranking"], "offsets": [26]}], "trigger": {"text": "combine", "tokens": ["combine"], "offsets": [8]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [29]}, {"text": "text representation approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["text", "representation", "approach"], "offsets": [3, 4, 5]}, {"text": "differential diagnosis", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["differential", "diagnosis"], "offsets": [37, 38]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [30]}}, {"event_type": "ITT", "arguments": [{"text": "differential diagnosis", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["differential", "diagnosis"], "offsets": [37, 38]}, {"text": "undiagnosed diseases network", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["undiagnosed", "diseases", "network"], "offsets": [60, 61, 62]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [43]}}, {"event_type": "CMP", "arguments": [{"text": "text representation approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["text", "representation", "approach"], "offsets": [3, 4, 5]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [66]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [66]}}, {"event_type": "MDS", "arguments": [{"text": "representations obtained from text representation techniques", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["different", "views", "obtained", "from", "text", "representation", "techniques"], "offsets": [9, 10, 84, 85, 89, 90, 91]}], "trigger": {"text": "prioritizing and combining", "tokens": ["prioritizing", "and", "combining"], "offsets": [80, 81, 82]}}], "document": ["we", "present", "a", "text", "representation", "approach", "that", "can", "combine", "different", "views", "(", "representations", ")", "of", "the", "same", "input", "through", "effective", "data", "fusion", "and", "attention", "strategies", "for", "ranking", "purposes", ".", "we", "apply", "our", "model", "to", "the", "problem", "of", "differential", "diagnosis", ",", "which", "aims", "to", "find", "the", "most", "probable", "diseases", "that", "match", "with", "clinical", "descriptions", "of", "patients", ",", "using", "data", "from", "the", "undiagnosed", "diseases", "network", ".", "our", "model", "outperforms", "several", "ranking", "approaches", "(", "including", "a", "commercially", "-", "supported", "system", ")", "by", "effectively", "prioritizing", "and", "combining", "representations", "obtained", "from", "traditional", "and", "recent", "text", "representation", "techniques", ".", "we", "elaborate", "on", "several", "aspects", "of", "our", "model", "and", "shed", "light", "on", "its", "improved", "performance", "."]}, {"venue": "ACL", "title": "A Working Memory Model for Task-oriented Dialog Response Generation", "abstract": "Recently, to incorporate external Knowledge Base (KB) information, one form of world knowledge, several end-to-end task-oriented dialog systems have been proposed. These models, however, tend to confound the dialog history with KB tuples and simply store them into one memory. Inspired by the psychological studies on working memory, we propose a working memory model (WMM2Seq) for dialog response generation. Our WMM2Seq adopts a working memory to interact with two separated long-term memories, which are the episodic memory for memorizing dialog history and the semantic memory for storing KB tuples. The working memory consists of a central executive to attend to the aforementioned memories, and a short-term storage system to store the \u201cactivated\u201d contents from the long-term memories. Furthermore, we introduce a context-sensitive perceptual process for the token representations of dialog history, and then feed them into the episodic memory. Extensive experiments on two task-oriented dialog datasets demonstrate that our WMM2Seq significantly outperforms the state-of-the-art results in several evaluation metrics.", "doc_id": "03c2e0cf5e85a48cbc10cca7af13714d", "publication_year": 2019, "sentences": ["recently , to incorporate external knowledge base ( kb ) information , one form of world knowledge , several end - to - end task - oriented dialog systems have been proposed .", "these models , however , tend to confound the dialog history with kb tuples and simply store them into one memory .", "inspired by the psychological studies on working memory , we propose a working memory model ( wmm2seq ) for dialog response generation .", "our wmm2seq adopts a working memory to interact with two separated long - term memories , which are the episodic memory for memorizing dialog history and the semantic memory for storing kb tuples .", "the working memory consists of a central executive to attend to the aforementioned memories , and a short - term storage system to store the \u201c activated \u201d contents from the long - term memories .", "furthermore , we introduce a context - sensitive perceptual process for the token representations of dialog history , and then feed them into the episodic memory .", "extensive experiments on two task - oriented dialog datasets demonstrate that our wmm2seq significantly outperforms the state - of - the - art results in several evaluation metrics ."], "events": [{"event_type": "ITT", "arguments": [{"text": "external knowledge base ( kb ) information", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["external", "knowledge", "base", "information"], "offsets": [4, 5, 6, 10]}], "trigger": {"text": "incorporate", "tokens": ["incorporate"], "offsets": [3]}}, {"event_type": "RWS", "arguments": [{"text": "kb tuples", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["kb", "tuples"], "offsets": [45, 46]}, {"text": "dialog history", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["dialog", "history"], "offsets": [42, 43]}], "trigger": {"text": "confound", "tokens": ["confound"], "offsets": [40]}}, {"event_type": "RWS", "arguments": [{"text": "one memory", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["one", "memory"], "offsets": [52, 53]}, {"text": "dialog history", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["dialog", "history"], "offsets": [42, 43]}, {"text": "kb tuples", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["kb", "tuples"], "offsets": [45, 46]}], "trigger": {"text": "simply store", "tokens": ["simply", "store"], "offsets": [48, 49]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [64]}, {"text": "working memory model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["working", "memory", "model"], "offsets": [67, 68, 69]}, {"text": "dialog response generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["dialog", "response", "generation"], "offsets": [74, 75, 76]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [65]}}, {"event_type": "MDS", "arguments": [{"text": "episodic memory for memorizing dialog history", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["episodic", "memory", "for", "memorizing", "dialog", "history"], "offsets": [97, 98, 99, 100, 101, 102]}, {"text": "semantic memory for storing kb tuples", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["semantic", "memory", "for", "storing", "kb", "tuples"], "offsets": [105, 106, 107, 108, 109, 110]}, {"text": "working memory", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["working", "memory"], "offsets": [82, 83]}], "trigger": {"text": "interact", "tokens": ["interact"], "offsets": [85]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [150]}, {"text": "context - sensitive perceptual process", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["context", "-", "sensitive", "perceptual", "process"], "offsets": [153, 154, 155, 156, 157]}, {"text": "token representations of dialog history", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["token", "representations", "of", "dialog", "history"], "offsets": [160, 161, 162, 163, 164]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [151]}}, {"event_type": "MDS", "arguments": [{"text": "episodic memory", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["episodic", "memory"], "offsets": [172, 173]}, {"text": "token representations of dialog history", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["token", "representations", "of", "dialog", "history"], "offsets": [160, 161, 162, 163, 164]}], "trigger": {"text": "feed", "tokens": ["feed"], "offsets": [168]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [189]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [184]}}, {"event_type": "CMP", "arguments": [{"text": "working memory model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["working", "memory", "model"], "offsets": [67, 68, 69]}, {"text": "two task - oriented dialog datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "task", "-", "oriented", "dialog", "datasets"], "offsets": [178, 179, 180, 181, 182, 183]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [188]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [189]}, {"text": "state - of - the - art results", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [191, 192, 193, 194, 195, 196, 197, 198]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [189]}}], "document": ["recently", ",", "to", "incorporate", "external", "knowledge", "base", "(", "kb", ")", "information", ",", "one", "form", "of", "world", "knowledge", ",", "several", "end", "-", "to", "-", "end", "task", "-", "oriented", "dialog", "systems", "have", "been", "proposed", ".", "these", "models", ",", "however", ",", "tend", "to", "confound", "the", "dialog", "history", "with", "kb", "tuples", "and", "simply", "store", "them", "into", "one", "memory", ".", "inspired", "by", "the", "psychological", "studies", "on", "working", "memory", ",", "we", "propose", "a", "working", "memory", "model", "(", "wmm2seq", ")", "for", "dialog", "response", "generation", ".", "our", "wmm2seq", "adopts", "a", "working", "memory", "to", "interact", "with", "two", "separated", "long", "-", "term", "memories", ",", "which", "are", "the", "episodic", "memory", "for", "memorizing", "dialog", "history", "and", "the", "semantic", "memory", "for", "storing", "kb", "tuples", ".", "the", "working", "memory", "consists", "of", "a", "central", "executive", "to", "attend", "to", "the", "aforementioned", "memories", ",", "and", "a", "short", "-", "term", "storage", "system", "to", "store", "the", "\u201c", "activated", "\u201d", "contents", "from", "the", "long", "-", "term", "memories", ".", "furthermore", ",", "we", "introduce", "a", "context", "-", "sensitive", "perceptual", "process", "for", "the", "token", "representations", "of", "dialog", "history", ",", "and", "then", "feed", "them", "into", "the", "episodic", "memory", ".", "extensive", "experiments", "on", "two", "task", "-", "oriented", "dialog", "datasets", "demonstrate", "that", "our", "wmm2seq", "significantly", "outperforms", "the", "state", "-", "of", "-", "the", "-", "art", "results", "in", "several", "evaluation", "metrics", "."]}, {"venue": "ACL", "title": "AraT5: Text-to-Text Transformers for Arabic Language Generation", "abstract": "Transfer learning with a unified Transformer framework (T5) that converts all language problems into a text-to-text format was recently proposed as a simple and effective transfer learning approach. Although a multilingual version of the T5 model (mT5) was also introduced, it is not clear how well it can fare on non-English tasks involving diverse data. To investigate this question, we apply mT5 on a language with a wide variety of dialects\u2013Arabic. For evaluation, we introduce a novel benchmark for ARabic language GENeration (ARGEN), covering seven important tasks. For model comparison, we pre-train three powerful Arabic T5-style models and evaluate them on ARGEN. Although pre-trained with ~49 less data, our new models perform significantly better than mT5 on all ARGEN tasks (in 52 out of 59 test sets) and set several new SOTAs. Our models also establish new SOTA on the recently-proposed, large Arabic language understanding evaluation benchmark ARLUE (Abdul-Mageed et al., 2021). Our new models are publicly available. We also link to ARGEN datasets through our repository: https://github.com/UBC-NLP/araT5.", "doc_id": "1fb0248f357d7813ffe002ccb90c9594", "publication_year": 2022, "sentences": ["transfer learning with a unified transformer framework ( t5 ) that converts all language problems into a text - to - text format was recently proposed as a simple and effective transfer learning approach .", "although a multilingual version of the t5 model ( mt5 ) was also introduced , it is not clear how well it can fare on non - english tasks involving diverse data .", "to investigate this question , we apply mt5 on a language with a wide variety of dialects \u2013 arabic .", "for evaluation , we introduce a novel benchmark for arabic language generation ( argen ) , covering seven important tasks .", "for model comparison , we pre - train three powerful arabic t5 - style models and evaluate them on argen .", "although pre - trained with ~ 49 less data , our new models perform significantly better than mt5 on all argen tasks ( in 52 out of 59 test sets ) and set several new sotas .", "our models also establish new sota on the recently - proposed , large arabic language understanding evaluation benchmark arlue ( abdul - mageed et al . , 2021 ) .", "our new models are publicly available .", "we also link to argen datasets through our repository : https : / / github . com / ubc - nlp / arat5 ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [91]}, {"text": "evaluation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["evaluation"], "offsets": [89]}, {"text": "benchmark for arabic language generation", "nugget_type": "APP", "argument_type": "Content", "tokens": ["benchmark", "for", "arabic", "language", "generation"], "offsets": [95, 96, 97, 98, 99]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [92]}}, {"event_type": "MDS", "arguments": [{"text": "model comparison", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["model", "comparison"], "offsets": [110, 111]}, {"text": "three powerful arabic t5 - style models", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["three", "powerful", "arabic", "t5", "-", "style", "models"], "offsets": [117, 118, 119, 120, 121, 122, 123]}], "trigger": {"text": "pre - train", "tokens": ["pre", "-", "train"], "offsets": [114, 115, 116]}}, {"event_type": "CMP", "arguments": [{"text": "on all argen tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "all", "argen", "tasks"], "offsets": [148, 149, 150, 151]}, {"text": "multilingual version of the t5 model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["multilingual", "version", "of", "the", "t5", "model"], "offsets": [37, 38, 39, 40, 41, 42]}, {"text": "significantly better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significantly", "better"], "offsets": [144, 145]}, {"text": "arabic t5 - style models", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["arabic", "t5", "-", "style", "models"], "offsets": [119, 120, 121, 122, 123]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [143]}}, {"event_type": "FAC", "arguments": [{"text": "on the recently - proposed , large arabic language understanding evaluation benchmark arlue", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "recently", "-", "proposed", ",", "large", "arabic", "language", "understanding", "evaluation", "benchmark", "arlue"], "offsets": [173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185]}, {"text": "new sota", "nugget_type": "STR", "argument_type": "Object", "tokens": ["new", "sota"], "offsets": [171, 172]}, {"text": "arabic t5 - style models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["arabic", "t5", "-", "style", "models"], "offsets": [119, 120, 121, 122, 123]}], "trigger": {"text": "establish", "tokens": ["establish"], "offsets": [170]}}, {"event_type": "ITT", "arguments": [{"text": "transfer learning with a unified transformer framework", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["transfer", "learning", "with", "a", "unified", "transformer", "framework"], "offsets": [0, 1, 2, 3, 4, 5, 6]}], "trigger": {"text": "converts", "tokens": ["converts"], "offsets": [11]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [73]}, {"text": "multilingual version of the t5 model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multilingual", "version", "of", "the", "t5", "model"], "offsets": [37, 38, 39, 40, 41, 42]}, {"text": "on a language with a wide variety of dialects", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "a", "language", "with", "a", "wide", "variety", "of", "dialects"], "offsets": [76, 77, 78, 79, 80, 81, 82, 83, 84]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [74]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [113]}, {"text": "three powerful arabic t5 - style models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["three", "powerful", "arabic", "t5", "-", "style", "models"], "offsets": [117, 118, 119, 120, 121, 122, 123]}, {"text": "argen", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["arabic", "language", "generation"], "offsets": [97, 98, 99]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [125]}}, {"event_type": "FAC", "arguments": [{"text": "new sotas", "nugget_type": "STR", "argument_type": "Object", "tokens": ["new", "sotas"], "offsets": [164, 165]}, {"text": "arabic t5 - style models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["arabic", "t5", "-", "style", "models"], "offsets": [119, 120, 121, 122, 123]}], "trigger": {"text": "set", "tokens": ["set"], "offsets": [162]}}], "document": ["transfer", "learning", "with", "a", "unified", "transformer", "framework", "(", "t5", ")", "that", "converts", "all", "language", "problems", "into", "a", "text", "-", "to", "-", "text", "format", "was", "recently", "proposed", "as", "a", "simple", "and", "effective", "transfer", "learning", "approach", ".", "although", "a", "multilingual", "version", "of", "the", "t5", "model", "(", "mt5", ")", "was", "also", "introduced", ",", "it", "is", "not", "clear", "how", "well", "it", "can", "fare", "on", "non", "-", "english", "tasks", "involving", "diverse", "data", ".", "to", "investigate", "this", "question", ",", "we", "apply", "mt5", "on", "a", "language", "with", "a", "wide", "variety", "of", "dialects", "\u2013", "arabic", ".", "for", "evaluation", ",", "we", "introduce", "a", "novel", "benchmark", "for", "arabic", "language", "generation", "(", "argen", ")", ",", "covering", "seven", "important", "tasks", ".", "for", "model", "comparison", ",", "we", "pre", "-", "train", "three", "powerful", "arabic", "t5", "-", "style", "models", "and", "evaluate", "them", "on", "argen", ".", "although", "pre", "-", "trained", "with", "~", "49", "less", "data", ",", "our", "new", "models", "perform", "significantly", "better", "than", "mt5", "on", "all", "argen", "tasks", "(", "in", "52", "out", "of", "59", "test", "sets", ")", "and", "set", "several", "new", "sotas", ".", "our", "models", "also", "establish", "new", "sota", "on", "the", "recently", "-", "proposed", ",", "large", "arabic", "language", "understanding", "evaluation", "benchmark", "arlue", "(", "abdul", "-", "mageed", "et", "al", ".", ",", "2021", ")", ".", "our", "new", "models", "are", "publicly", "available", ".", "we", "also", "link", "to", "argen", "datasets", "through", "our", "repository", ":", "https", ":", "/", "/", "github", ".", "com", "/", "ubc", "-", "nlp", "/", "arat5", "."]}, {"venue": "ACL", "title": "Sequence-to-sequence AMR Parsing with Ancestor Information", "abstract": "AMR parsing is the task that maps a sentence to an AMR semantic graph automatically. The difficulty comes from generating the complex graph structure. The previous state-of-the-art method translates the AMR graph into a sequence, then directly fine-tunes a pretrained sequence-to-sequence Transformer model (BART). However, purely treating the graph as a sequence does not take advantage of structural information about the graph. In this paper, we design several strategies to add the important ancestor information into the Transformer Decoder. Our experiments show that we can improve the performance for both AMR 2.0 and AMR 3.0 dataset and achieve new state-of-the-art results.", "doc_id": "b8182fe14aabec18def6089d8df498c7", "publication_year": 2022, "sentences": ["amr parsing is the task that maps a sentence to an amr semantic graph automatically .", "the difficulty comes from generating the complex graph structure .", "the previous state - of - the - art method translates the amr graph into a sequence , then directly fine - tunes a pretrained sequence - to - sequence transformer model ( bart ) .", "however , purely treating the graph as a sequence does not take advantage of structural information about the graph .", "in this paper , we design several strategies to add the important ancestor information into the transformer decoder .", "our experiments show that we can improve the performance for both amr 2 . 0 and amr 3 . 0 dataset and achieve new state - of - the - art results ."], "events": [{"event_type": "ITT", "arguments": [{"text": "amr parsing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["amr", "parsing"], "offsets": [0, 1]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [4]}}, {"event_type": "RWS", "arguments": [{"text": "amr graph", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["amr", "graph"], "offsets": [38, 39]}, {"text": "previous state - of - the - art method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "state", "-", "of", "-", "the", "-", "art", "method"], "offsets": [27, 28, 29, 30, 31, 32, 33, 34, 35]}, {"text": "sequence", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["sequence"], "offsets": [42]}], "trigger": {"text": "translates", "tokens": ["translates"], "offsets": [36]}}, {"event_type": "RWS", "arguments": [{"text": "directly", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["directly"], "offsets": [45]}, {"text": "pretrained sequence - to - sequence transformer model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["pretrained", "sequence", "-", "to", "-", "sequence", "transformer", "model"], "offsets": [50, 51, 52, 53, 54, 55, 56, 57]}, {"text": "previous state - of - the - art method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "state", "-", "of", "-", "the", "-", "art", "method"], "offsets": [27, 28, 29, 30, 31, 32, 33, 34, 35]}], "trigger": {"text": "fine - tunes", "tokens": ["fine", "-", "tunes"], "offsets": [46, 47, 48]}}, {"event_type": "RWF", "arguments": [{"text": "structural information about the graph", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["structural", "information", "about", "the", "graph"], "offsets": [76, 77, 78, 79, 80]}], "trigger": {"text": "not take", "tokens": ["not", "take"], "offsets": [72, 73]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [86]}, {"text": "strategies to add the important ancestor information", "nugget_type": "APP", "argument_type": "Content", "tokens": ["strategies", "to", "add", "the", "important", "ancestor", "information"], "offsets": [89, 90, 91, 92, 93, 94, 95]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [87]}}, {"event_type": "MDS", "arguments": [{"text": "important ancestor information", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["important", "ancestor", "information"], "offsets": [93, 94, 95]}, {"text": "transformer decoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["transformer", "decoder"], "offsets": [98, 99]}], "trigger": {"text": "add", "tokens": ["add"], "offsets": [91]}}, {"event_type": "FAC", "arguments": [{"text": "performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance"], "offsets": [109]}, {"text": "amr 2 . 0", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["amr", "2", ".", "0"], "offsets": [112, 113, 114, 115]}, {"text": "amr 3 . 0 dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["amr", "3", ".", "0", "dataset"], "offsets": [117, 118, 119, 120, 121]}, {"text": "strategies to add the important ancestor information", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["strategies", "to", "add", "the", "important", "ancestor", "information"], "offsets": [89, 90, 91, 92, 93, 94, 95]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [107]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art results", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [125, 126, 127, 128, 129, 130, 131, 132]}, {"text": "strategies to add the important ancestor information", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["strategies", "to", "add", "the", "important", "ancestor", "information"], "offsets": [89, 90, 91, 92, 93, 94, 95]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [123]}}], "document": ["amr", "parsing", "is", "the", "task", "that", "maps", "a", "sentence", "to", "an", "amr", "semantic", "graph", "automatically", ".", "the", "difficulty", "comes", "from", "generating", "the", "complex", "graph", "structure", ".", "the", "previous", "state", "-", "of", "-", "the", "-", "art", "method", "translates", "the", "amr", "graph", "into", "a", "sequence", ",", "then", "directly", "fine", "-", "tunes", "a", "pretrained", "sequence", "-", "to", "-", "sequence", "transformer", "model", "(", "bart", ")", ".", "however", ",", "purely", "treating", "the", "graph", "as", "a", "sequence", "does", "not", "take", "advantage", "of", "structural", "information", "about", "the", "graph", ".", "in", "this", "paper", ",", "we", "design", "several", "strategies", "to", "add", "the", "important", "ancestor", "information", "into", "the", "transformer", "decoder", ".", "our", "experiments", "show", "that", "we", "can", "improve", "the", "performance", "for", "both", "amr", "2", ".", "0", "and", "amr", "3", ".", "0", "dataset", "and", "achieve", "new", "state", "-", "of", "-", "the", "-", "art", "results", "."]}, {"venue": "ACL", "title": "Knowledge Distillation for Multilingual Unsupervised Neural Machine Translation", "abstract": "Unsupervised neural machine translation (UNMT) has recently achieved remarkable results for several language pairs. However, it can only translate between a single language pair and cannot produce translation results for multiple language pairs at the same time. That is, research on multilingual UNMT has been limited. In this paper, we empirically introduce a simple method to translate between thirteen languages using a single encoder and a single decoder, making use of multilingual data to improve UNMT for all language pairs. On the basis of the empirical findings, we propose two knowledge distillation methods to further enhance multilingual UNMT performance. Our experiments on a dataset with English translated to and from twelve other languages (including three language families and six language branches) show remarkable results, surpassing strong unsupervised individual baselines while achieving promising performance between non-English language pairs in zero-shot translation scenarios and alleviating poor performance in low-resource language pairs.", "doc_id": "d264d69fa4eacfb649deed357152f363", "publication_year": 2020, "sentences": ["unsupervised neural machine translation ( unmt ) has recently achieved remarkable results for several language pairs .", "however , it can only translate between a single language pair and cannot produce translation results for multiple language pairs at the same time .", "that is , research on multilingual unmt has been limited .", "in this paper , we empirically introduce a simple method to translate between thirteen languages using a single encoder and a single decoder , making use of multilingual data to improve unmt for all language pairs .", "on the basis of the empirical findings , we propose two knowledge distillation methods to further enhance multilingual unmt performance .", "our experiments on a dataset with english translated to and from twelve other languages ( including three language families and six language branches ) show remarkable results , surpassing strong unsupervised individual baselines while achieving promising performance between non - english language pairs in zero - shot translation scenarios and alleviating poor performance in low - resource language pairs ."], "events": [{"event_type": "ITT", "arguments": [{"text": "unsupervised neural machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["unmt"], "offsets": [84]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [9]}}, {"event_type": "RWF", "arguments": [{"text": "multiple language pairs", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multiple", "language", "pairs"], "offsets": [34, 35, 36]}, {"text": "at the same time", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "the", "same", "time"], "offsets": [37, 38, 39, 40]}, {"text": "cannot produce", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["cannot", "produce"], "offsets": [29, 30]}, {"text": "unsupervised neural machine translation", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["unmt"], "offsets": [84]}], "trigger": {"text": "cannot produce", "tokens": ["cannot", "produce"], "offsets": [29, 30]}}, {"event_type": "MDS", "arguments": [{"text": "single encoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["single", "encoder"], "offsets": [70, 71]}, {"text": "single decoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["single", "decoder"], "offsets": [74, 75]}, {"text": "translate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["translate"], "offsets": [64]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [68]}}, {"event_type": "MDS", "arguments": [{"text": "multilingual data", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["multilingual", "data"], "offsets": [80, 81]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [83]}, {"text": "all language pairs", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["all", "language", "pairs"], "offsets": [86, 87, 88]}], "trigger": {"text": "making use of", "tokens": ["making", "use", "of"], "offsets": [77, 78, 79]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [98]}, {"text": "two knowledge distillation methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "knowledge", "distillation", "methods"], "offsets": [100, 101, 102, 103]}, {"text": "enhance", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enhance"], "offsets": [106]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [99]}}, {"event_type": "PUR", "arguments": [{"text": "multilingual unmt performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["multilingual", "unmt", "performance"], "offsets": [107, 108, 109]}], "trigger": {"text": "enhance", "tokens": ["enhance"], "offsets": [106]}}, {"event_type": "CMP", "arguments": [{"text": "strong unsupervised individual baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "unsupervised", "individual", "baselines"], "offsets": [140, 141, 142, 143]}], "trigger": {"text": "surpassing", "tokens": ["surpassing"], "offsets": [139]}}, {"event_type": "FAC", "arguments": [{"text": "promising performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["promising", "performance"], "offsets": [146, 147]}, {"text": "between non - english language pairs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "non", "-", "english", "language", "pairs"], "offsets": [148, 149, 150, 151, 152, 153]}, {"text": "zero - shot translation scenarios", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["zero", "-", "shot", "translation", "scenarios"], "offsets": [155, 156, 157, 158, 159]}], "trigger": {"text": "achieving", "tokens": ["achieving"], "offsets": [145]}}, {"event_type": "FAC", "arguments": [{"text": "poor performance", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["poor", "performance"], "offsets": [162, 163]}, {"text": "low - resource language pairs", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["low", "-", "resource", "language", "pairs"], "offsets": [165, 166, 167, 168, 169]}], "trigger": {"text": "alleviating", "tokens": ["alleviating"], "offsets": [161]}}, {"event_type": "RWF", "arguments": [{"text": "unsupervised neural machine translation", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["unmt"], "offsets": [84]}, {"text": "only", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["only"], "offsets": [21]}, {"text": "between a single language pair", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "a", "single", "language", "pair"], "offsets": [23, 24, 25, 26, 27]}], "trigger": {"text": "translate", "tokens": ["translate"], "offsets": [22]}}, {"event_type": "PUR", "arguments": [{"text": "between thirteen languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "thirteen", "languages"], "offsets": [65, 66, 67]}], "trigger": {"text": "translate", "tokens": ["translate"], "offsets": [64]}}, {"event_type": "PUR", "arguments": [{"text": "unmt", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["unmt"], "offsets": [84]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [83]}}], "document": ["unsupervised", "neural", "machine", "translation", "(", "unmt", ")", "has", "recently", "achieved", "remarkable", "results", "for", "several", "language", "pairs", ".", "however", ",", "it", "can", "only", "translate", "between", "a", "single", "language", "pair", "and", "cannot", "produce", "translation", "results", "for", "multiple", "language", "pairs", "at", "the", "same", "time", ".", "that", "is", ",", "research", "on", "multilingual", "unmt", "has", "been", "limited", ".", "in", "this", "paper", ",", "we", "empirically", "introduce", "a", "simple", "method", "to", "translate", "between", "thirteen", "languages", "using", "a", "single", "encoder", "and", "a", "single", "decoder", ",", "making", "use", "of", "multilingual", "data", "to", "improve", "unmt", "for", "all", "language", "pairs", ".", "on", "the", "basis", "of", "the", "empirical", "findings", ",", "we", "propose", "two", "knowledge", "distillation", "methods", "to", "further", "enhance", "multilingual", "unmt", "performance", ".", "our", "experiments", "on", "a", "dataset", "with", "english", "translated", "to", "and", "from", "twelve", "other", "languages", "(", "including", "three", "language", "families", "and", "six", "language", "branches", ")", "show", "remarkable", "results", ",", "surpassing", "strong", "unsupervised", "individual", "baselines", "while", "achieving", "promising", "performance", "between", "non", "-", "english", "language", "pairs", "in", "zero", "-", "shot", "translation", "scenarios", "and", "alleviating", "poor", "performance", "in", "low", "-", "resource", "language", "pairs", "."]}, {"venue": "ACL", "title": "Women\u2019s Syntactic Resilience and Men\u2019s Grammatical Luck: Gender-Bias in Part-of-Speech Tagging and Dependency Parsing", "abstract": "Several linguistic studies have shown the prevalence of various lexical and grammatical patterns in texts authored by a person of a particular gender, but models for part-of-speech tagging and dependency parsing have still not adapted to account for these differences. To address this, we annotate the Wall Street Journal part of the Penn Treebank with the gender information of the articles\u2019 authors, and build taggers and parsers trained on this data that show performance differences in text written by men and women. Further analyses reveal numerous part-of-speech tags and syntactic relations whose prediction performances benefit from the prevalence of a specific gender in the training data. The results underscore the importance of accounting for gendered differences in syntactic tasks, and outline future venues for developing more accurate taggers and parsers. We release our data to the research community.", "doc_id": "e40e3aa94cdf002e135ac3558cbcb34b", "publication_year": 2019, "sentences": ["several linguistic studies have shown the prevalence of various lexical and grammatical patterns in texts authored by a person of a particular gender , but models for part - of - speech tagging and dependency parsing have still not adapted to account for these differences .", "to address this , we annotate the wall street journal part of the penn treebank with the gender information of the articles \u2019 authors , and build taggers and parsers trained on this data that show performance differences in text written by men and women .", "further analyses reveal numerous part - of - speech tags and syntactic relations whose prediction performances benefit from the prevalence of a specific gender in the training data .", "the results underscore the importance of accounting for gendered differences in syntactic tasks , and outline future venues for developing more accurate taggers and parsers .", "we release our data to the research community ."], "events": [{"event_type": "ITT", "arguments": [{"text": "prevalence of various lexical", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["prevalence", "of", "various", "lexical"], "offsets": [6, 7, 8, 9]}, {"text": "prevalence of grammatical patterns", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["prevalence", "of", "grammatical", "patterns"], "offsets": [6, 7, 11, 12]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "differences", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["differences"], "offsets": [44]}, {"text": "not adapted to account", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "adapted", "to", "account"], "offsets": [38, 39, 40, 41]}, {"text": "models for part - of - speech tagging", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["models", "for", "part", "-", "of", "-", "speech", "tagging"], "offsets": [25, 26, 27, 28, 29, 30, 31, 32]}], "trigger": {"text": "not adapted to account", "tokens": ["not", "adapted", "to", "account"], "offsets": [38, 39, 40, 41]}}, {"event_type": "MDS", "arguments": [{"text": "gender information of the articles \u2019 authors", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["gender", "information", "of", "the", "articles", "\u2019", "authors"], "offsets": [63, 64, 65, 66, 67, 68, 69]}, {"text": "wall street journal part of the penn treebank", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["wall", "street", "journal", "part", "of", "the", "penn", "treebank"], "offsets": [53, 54, 55, 56, 57, 58, 59, 60]}], "trigger": {"text": "annotate", "tokens": ["annotate"], "offsets": [51]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [50]}, {"text": "show", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["show"], "offsets": [81]}, {"text": "taggers trained on this data", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["taggers", "trained", "on", "this", "data"], "offsets": [73, 76, 77, 78, 79]}, {"text": "parsers trained on this data", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["parsers", "trained", "on", "this", "data"], "offsets": [75, 76, 77, 78, 79]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [72]}}, {"event_type": "FAC", "arguments": [{"text": "prediction performances", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["prediction", "performances"], "offsets": [106, 107]}, {"text": "prevalence of a specific gender", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["prevalence", "of", "a", "specific", "gender"], "offsets": [111, 112, 113, 114, 115]}], "trigger": {"text": "benefit", "tokens": ["benefit"], "offsets": [108]}}, {"event_type": "FAC", "arguments": [{"text": "importance of accounting for gendered differences", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["importance", "of", "accounting", "for", "gendered", "differences"], "offsets": [125, 126, 127, 128, 129, 130]}, {"text": "in syntactic tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "syntactic", "tasks"], "offsets": [131, 132, 133]}], "trigger": {"text": "underscore", "tokens": ["underscore"], "offsets": [123]}}, {"event_type": "FAC", "arguments": [{"text": "future venues", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["future", "venues"], "offsets": [137, 138]}, {"text": "developing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["developing"], "offsets": [140]}], "trigger": {"text": "outline", "tokens": ["outline"], "offsets": [136]}}, {"event_type": "PUR", "arguments": [{"text": "more accurate taggers", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["more", "accurate", "taggers"], "offsets": [141, 142, 143]}, {"text": "more accurate parsers", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["more", "accurate", "parsers"], "offsets": [141, 142, 145]}], "trigger": {"text": "developing", "tokens": ["developing"], "offsets": [140]}}, {"event_type": "PUR", "arguments": [{"text": "performance differences", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["performance", "differences"], "offsets": [82, 83]}, {"text": "in text written by men and women", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "text", "written", "by", "men", "and", "women"], "offsets": [84, 85, 86, 87, 88, 89, 90]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [81]}}, {"event_type": "FAC", "arguments": [{"text": "part - of - speech tags", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["part", "-", "of", "-", "speech", "tags"], "offsets": [96, 97, 98, 99, 100, 101]}, {"text": "syntactic relations", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["syntactic", "relations"], "offsets": [103, 104]}], "trigger": {"text": "reveal", "tokens": ["reveal"], "offsets": [94]}}], "document": ["several", "linguistic", "studies", "have", "shown", "the", "prevalence", "of", "various", "lexical", "and", "grammatical", "patterns", "in", "texts", "authored", "by", "a", "person", "of", "a", "particular", "gender", ",", "but", "models", "for", "part", "-", "of", "-", "speech", "tagging", "and", "dependency", "parsing", "have", "still", "not", "adapted", "to", "account", "for", "these", "differences", ".", "to", "address", "this", ",", "we", "annotate", "the", "wall", "street", "journal", "part", "of", "the", "penn", "treebank", "with", "the", "gender", "information", "of", "the", "articles", "\u2019", "authors", ",", "and", "build", "taggers", "and", "parsers", "trained", "on", "this", "data", "that", "show", "performance", "differences", "in", "text", "written", "by", "men", "and", "women", ".", "further", "analyses", "reveal", "numerous", "part", "-", "of", "-", "speech", "tags", "and", "syntactic", "relations", "whose", "prediction", "performances", "benefit", "from", "the", "prevalence", "of", "a", "specific", "gender", "in", "the", "training", "data", ".", "the", "results", "underscore", "the", "importance", "of", "accounting", "for", "gendered", "differences", "in", "syntactic", "tasks", ",", "and", "outline", "future", "venues", "for", "developing", "more", "accurate", "taggers", "and", "parsers", ".", "we", "release", "our", "data", "to", "the", "research", "community", "."]}, {"venue": "ACL", "title": "Modelling Suspense in Short Stories as Uncertainty Reduction over Neural Representation", "abstract": "Suspense is a crucial ingredient of narrative fiction, engaging readers and making stories compelling. While there is a vast theoretical literature on suspense, it is computationally not well understood. We compare two ways for modelling suspense: surprise, a backward-looking measure of how unexpected the current state is given the story so far; and uncertainty reduction, a forward-looking measure of how unexpected the continuation of the story is. Both can be computed either directly over story representations or over their probability distributions. We propose a hierarchical language model that encodes stories and computes surprise and uncertainty reduction. Evaluating against short stories annotated with human suspense judgements, we find that uncertainty reduction over representations is the best predictor, resulting in near human accuracy. We also show that uncertainty reduction can be used to predict suspenseful events in movie synopses.", "doc_id": "58be16ce129d8913f1906048cbda67bc", "publication_year": 2020, "sentences": ["suspense is a crucial ingredient of narrative fiction , engaging readers and making stories compelling .", "while there is a vast theoretical literature on suspense , it is computationally not well understood .", "we compare two ways for modelling suspense : surprise , a backward - looking measure of how unexpected the current state is given the story so far ; and uncertainty reduction , a forward - looking measure of how unexpected the continuation of the story is .", "both can be computed either directly over story representations or over their probability distributions .", "we propose a hierarchical language model that encodes stories and computes surprise and uncertainty reduction .", "evaluating against short stories annotated with human suspense judgements , we find that uncertainty reduction over representations is the best predictor , resulting in near human accuracy .", "we also show that uncertainty reduction can be used to predict suspenseful events in movie synopses ."], "events": [{"event_type": "ITT", "arguments": [{"text": "suspense", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["suspense"], "offsets": [0]}], "trigger": {"text": "ingredient", "tokens": ["ingredient"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "suspense", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["suspense"], "offsets": [24]}], "trigger": {"text": "computationally not well understood", "tokens": ["computationally", "not", "well", "understood"], "offsets": [28, 29, 30, 31]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [33]}, {"text": "two ways", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "ways"], "offsets": [35, 36]}, {"text": "modelling", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["modelling"], "offsets": [38]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [34]}}, {"event_type": "PUR", "arguments": [{"text": "suspense", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["suspense"], "offsets": [39]}], "trigger": {"text": "modelling", "tokens": ["modelling"], "offsets": [38]}}, {"event_type": "WKS", "arguments": [{"text": "surprise", "nugget_type": "APP", "argument_type": "Content", "tokens": ["surprise"], "offsets": [41]}, {"text": "uncertainty reduction", "nugget_type": "APP", "argument_type": "Content", "tokens": ["uncertainty", "reduction"], "offsets": [62, 63]}, {"text": "over story representations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "story", "representations"], "offsets": [86, 87, 88]}, {"text": "over their probability distributions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "their", "probability", "distributions"], "offsets": [90, 91, 92, 93]}], "trigger": {"text": "computed", "tokens": ["computed"], "offsets": [83]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [95]}, {"text": "hierarchical language model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hierarchical", "language", "model"], "offsets": [98, 99, 100]}, {"text": "encodes", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["encodes"], "offsets": [102]}, {"text": "computes", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["computes"], "offsets": [105]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [96]}}, {"event_type": "PUR", "arguments": [{"text": "stories", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["stories"], "offsets": [103]}], "trigger": {"text": "encodes", "tokens": ["encodes"], "offsets": [102]}}, {"event_type": "PUR", "arguments": [{"text": "surprise", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["surprise"], "offsets": [106]}, {"text": "uncertainty reduction", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["uncertainty", "reduction"], "offsets": [108, 109]}], "trigger": {"text": "computes", "tokens": ["computes"], "offsets": [105]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [121]}, {"text": "with human suspense judgements", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "human", "suspense", "judgements"], "offsets": [116, 117, 118, 119]}, {"text": "short stories", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["short", "stories"], "offsets": [113, 114]}], "trigger": {"text": "evaluating", "tokens": ["evaluating"], "offsets": [111]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [121]}, {"text": "best predictor", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["best", "predictor"], "offsets": [130, 131]}, {"text": "resulting in", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["resulting", "in"], "offsets": [133, 134]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [122]}}, {"event_type": "FAC", "arguments": [{"text": "uncertainty reduction over representations", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["uncertainty", "reduction", "over", "representations"], "offsets": [124, 125, 126, 127]}], "trigger": {"text": "best predictor", "tokens": ["best", "predictor"], "offsets": [130, 131]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [139]}, {"text": "used", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["used"], "offsets": [147]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [141]}}, {"event_type": "FAC", "arguments": [{"text": "uncertainty reduction", "nugget_type": "APP", "argument_type": "Object", "tokens": ["uncertainty", "reduction"], "offsets": [143, 144]}, {"text": "in movie synopses", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "movie", "synopses"], "offsets": [152, 153, 154]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [149]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [147]}}, {"event_type": "PUR", "arguments": [{"text": "suspenseful events", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["suspenseful", "events"], "offsets": [150, 151]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [149]}}, {"event_type": "FAC", "arguments": [{"text": "near human", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["near", "human"], "offsets": [135, 136]}, {"text": "accuracy", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["accuracy"], "offsets": [137]}, {"text": "uncertainty reduction", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["uncertainty", "reduction"], "offsets": [124, 125]}], "trigger": {"text": "resulting in", "tokens": ["resulting", "in"], "offsets": [133, 134]}}], "document": ["suspense", "is", "a", "crucial", "ingredient", "of", "narrative", "fiction", ",", "engaging", "readers", "and", "making", "stories", "compelling", ".", "while", "there", "is", "a", "vast", "theoretical", "literature", "on", "suspense", ",", "it", "is", "computationally", "not", "well", "understood", ".", "we", "compare", "two", "ways", "for", "modelling", "suspense", ":", "surprise", ",", "a", "backward", "-", "looking", "measure", "of", "how", "unexpected", "the", "current", "state", "is", "given", "the", "story", "so", "far", ";", "and", "uncertainty", "reduction", ",", "a", "forward", "-", "looking", "measure", "of", "how", "unexpected", "the", "continuation", "of", "the", "story", "is", ".", "both", "can", "be", "computed", "either", "directly", "over", "story", "representations", "or", "over", "their", "probability", "distributions", ".", "we", "propose", "a", "hierarchical", "language", "model", "that", "encodes", "stories", "and", "computes", "surprise", "and", "uncertainty", "reduction", ".", "evaluating", "against", "short", "stories", "annotated", "with", "human", "suspense", "judgements", ",", "we", "find", "that", "uncertainty", "reduction", "over", "representations", "is", "the", "best", "predictor", ",", "resulting", "in", "near", "human", "accuracy", ".", "we", "also", "show", "that", "uncertainty", "reduction", "can", "be", "used", "to", "predict", "suspenseful", "events", "in", "movie", "synopses", "."]}, {"venue": "ACL", "title": "Hierarchical Context-aware Network for Dense Video Event Captioning", "abstract": "Dense video event captioning aims to generate a sequence of descriptive captions for each event in a long untrimmed video. Video-level context provides important information and facilities the model to generate consistent and less redundant captions between events. In this paper, we introduce a novel Hierarchical Context-aware Network for dense video event captioning (HCN) to capture context from various aspects. In detail, the model leverages local and global context with different mechanisms to jointly learn to generate coherent captions. The local context module performs full interaction between neighbor frames and the global context module selectively attends to previous or future events. According to our extensive experiment on both Youcook2 and Activitynet Captioning datasets, the video-level HCN model outperforms the event-level context-agnostic model by a large margin. The code is available at https://github.com/KirkGuo/HCN.", "doc_id": "672b264340445985697afe4a34a2c39c", "publication_year": 2021, "sentences": ["dense video event captioning aims to generate a sequence of descriptive captions for each event in a long untrimmed video .", "video - level context provides important information and facilities the model to generate consistent and less redundant captions between events .", "in this paper , we introduce a novel hierarchical context - aware network for dense video event captioning ( hcn ) to capture context from various aspects .", "in detail , the model leverages local and global context with different mechanisms to jointly learn to generate coherent captions .", "the local context module performs full interaction between neighbor frames and the global context module selectively attends to previous or future events .", "according to our extensive experiment on both youcook2 and activitynet captioning datasets , the video - level hcn model outperforms the event - level context - agnostic model by a large margin .", "the code is available at https : / / github . com / kirkguo / hcn ."], "events": [{"event_type": "ITT", "arguments": [{"text": "dense video event captioning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["dense", "video", "event", "captioning"], "offsets": [0, 1, 2, 3]}, {"text": "in a long untrimmed video", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "long", "untrimmed", "video"], "offsets": [15, 16, 17, 18, 19]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [6]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [46]}, {"text": "hierarchical context - aware network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hierarchical", "context", "-", "aware", "network"], "offsets": [50, 51, 52, 53, 54]}, {"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [64]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [47]}}, {"event_type": "WKS", "arguments": [{"text": "with different mechanisms", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "different", "mechanisms"], "offsets": [80, 81, 82]}, {"text": "jointly learn", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["jointly", "learn"], "offsets": [84, 85]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [87]}, {"text": "local context", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["local", "context"], "offsets": [76, 79]}, {"text": "global context", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["global", "context"], "offsets": [78, 79]}], "trigger": {"text": "leverages", "tokens": ["leverages"], "offsets": [75]}}, {"event_type": "PUR", "arguments": [{"text": "coherent captions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["coherent", "captions"], "offsets": [88, 89]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [87]}}, {"event_type": "PUR", "arguments": [{"text": "context", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["context"], "offsets": [65]}, {"text": "from various aspects", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "various", "aspects"], "offsets": [66, 67, 68]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [64]}}, {"event_type": "WKS", "arguments": [{"text": "full interaction", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["full", "interaction"], "offsets": [96, 97]}, {"text": "between neighbor frames", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "neighbor", "frames"], "offsets": [98, 99, 100]}], "trigger": {"text": "performs", "tokens": ["performs"], "offsets": [95]}}, {"event_type": "WKS", "arguments": [{"text": "previous events", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["previous", "events"], "offsets": [109, 112]}, {"text": "future events", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["future", "events"], "offsets": [111, 112]}], "trigger": {"text": "selectively attends", "tokens": ["selectively", "attends"], "offsets": [106, 107]}}, {"event_type": "CMP", "arguments": [{"text": "youcook2", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["youcook2"], "offsets": [121]}, {"text": "activitynet captioning datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["activitynet", "captioning", "datasets"], "offsets": [123, 124, 125]}, {"text": "video - level hcn model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["video", "-", "level", "hierarchical", "context", "-", "aware", "network", "model"], "offsets": [128, 129, 130, 50, 51, 52, 53, 54, 132]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [133]}, {"text": "event - level context - agnostic model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["event", "-", "level", "context", "-", "agnostic", "model"], "offsets": [135, 136, 137, 138, 139, 140, 141]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [133]}}, {"event_type": "RWS", "arguments": [{"text": "between events", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "events"], "offsets": [39, 40]}, {"text": "video - level context", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["video", "-", "level", "context"], "offsets": [21, 22, 23, 24]}, {"text": "consistent captions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["consistent", "captions"], "offsets": [34, 38]}, {"text": "less redundant captions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["less", "redundant", "captions"], "offsets": [36, 37, 38]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [33]}}, {"event_type": "RWS", "arguments": [{"text": "video - level context", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["video", "-", "level", "context"], "offsets": [21, 22, 23, 24]}, {"text": "between events", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "events"], "offsets": [39, 40]}, {"text": "consistent captions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["consistent", "captions"], "offsets": [34, 38]}, {"text": "less redundant captions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["less", "redundant", "captions"], "offsets": [36, 37, 38]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [33]}}], "document": ["dense", "video", "event", "captioning", "aims", "to", "generate", "a", "sequence", "of", "descriptive", "captions", "for", "each", "event", "in", "a", "long", "untrimmed", "video", ".", "video", "-", "level", "context", "provides", "important", "information", "and", "facilities", "the", "model", "to", "generate", "consistent", "and", "less", "redundant", "captions", "between", "events", ".", "in", "this", "paper", ",", "we", "introduce", "a", "novel", "hierarchical", "context", "-", "aware", "network", "for", "dense", "video", "event", "captioning", "(", "hcn", ")", "to", "capture", "context", "from", "various", "aspects", ".", "in", "detail", ",", "the", "model", "leverages", "local", "and", "global", "context", "with", "different", "mechanisms", "to", "jointly", "learn", "to", "generate", "coherent", "captions", ".", "the", "local", "context", "module", "performs", "full", "interaction", "between", "neighbor", "frames", "and", "the", "global", "context", "module", "selectively", "attends", "to", "previous", "or", "future", "events", ".", "according", "to", "our", "extensive", "experiment", "on", "both", "youcook2", "and", "activitynet", "captioning", "datasets", ",", "the", "video", "-", "level", "hcn", "model", "outperforms", "the", "event", "-", "level", "context", "-", "agnostic", "model", "by", "a", "large", "margin", ".", "the", "code", "is", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "kirkguo", "/", "hcn", "."]}, {"venue": "ACL", "title": "Discrete Cosine Transform as Universal Sentence Encoder", "abstract": "Modern sentence encoders are used to generate dense vector representations that capture the underlying linguistic characteristics for a sequence of words, including phrases, sentences, or paragraphs. These kinds of representations are ideal for training a classifier for an end task such as sentiment analysis, question answering and text classification. Different models have been proposed to efficiently generate general purpose sentence representations to be used in pretraining protocols. While averaging is the most commonly used efficient sentence encoder, Discrete Cosine Transform (DCT) was recently proposed as an alternative that captures the underlying syntactic characteristics of a given text without compromising practical efficiency compared to averaging. However, as with most other sentence encoders, the DCT sentence encoder was only evaluated in English. To this end, we utilize DCT encoder to generate universal sentence representation for different languages such as German, French, Spanish and Russian. The experimental results clearly show the superior effectiveness of DCT encoding in which consistent performance improvements are achieved over strong baselines on multiple standardized datasets", "doc_id": "75a63d5e750343e9f620146e979f046f", "publication_year": 2021, "sentences": ["modern sentence encoders are used to generate dense vector representations that capture the underlying linguistic characteristics for a sequence of words , including phrases , sentences , or paragraphs .", "these kinds of representations are ideal for training a classifier for an end task such as sentiment analysis , question answering and text classification .", "different models have been proposed to efficiently generate general purpose sentence representations to be used in pretraining protocols .", "while averaging is the most commonly used efficient sentence encoder , discrete cosine transform ( dct ) was recently proposed as an alternative that captures the underlying syntactic characteristics of a given text without compromising practical efficiency compared to averaging .", "however , as with most other sentence encoders , the dct sentence encoder was only evaluated in english .", "to this end , we utilize dct encoder to generate universal sentence representation for different languages such as german , french , spanish and russian .", "the experimental results clearly show the superior effectiveness of dct encoding in which consistent performance improvements are achieved over strong baselines on multiple standardized datasets"], "events": [{"event_type": "ITT", "arguments": [{"text": "sentence encoders", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["sentence", "encoders"], "offsets": [1, 2]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [6]}}, {"event_type": "RWS", "arguments": [{"text": "different models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["different", "models"], "offsets": [55, 56]}, {"text": "general purpose sentence representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["general", "purpose", "sentence", "representations"], "offsets": [63, 64, 65, 66]}, {"text": "pretraining protocols", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["pretraining", "protocols"], "offsets": [71, 72]}], "trigger": {"text": "efficiently generate", "tokens": ["efficiently", "generate"], "offsets": [61, 62]}}, {"event_type": "RWS", "arguments": [{"text": "discrete cosine transform", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["discrete", "cosine", "transform"], "offsets": [85, 86, 87]}, {"text": "underlying syntactic characteristics of a given text", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["underlying", "syntactic", "characteristics", "of", "a", "given", "text"], "offsets": [100, 101, 102, 103, 104, 105, 106]}, {"text": "without compromising practical efficiency compared to averaging", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "compromising", "practical", "efficiency", "compared", "to", "averaging"], "offsets": [107, 108, 109, 110, 111, 112, 113]}], "trigger": {"text": "captures", "tokens": ["captures"], "offsets": [98]}}, {"event_type": "RWF", "arguments": [{"text": "dct sentence encoder", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["discrete", "cosine", "transform", "sentence", "encoder"], "offsets": [85, 86, 87, 126, 127]}, {"text": "only", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["only"], "offsets": [129]}], "trigger": {"text": "evaluated in english", "tokens": ["evaluated", "in", "english"], "offsets": [130, 131, 132]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [138]}, {"text": "dct encoder", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["discrete", "cosine", "transform", "encoder"], "offsets": [85, 86, 87, 141]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [143]}], "trigger": {"text": "utilize", "tokens": ["utilize"], "offsets": [139]}}, {"event_type": "PUR", "arguments": [{"text": "universal sentence representation for different languages", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["universal", "sentence", "representation", "for", "different", "languages"], "offsets": [144, 145, 146, 147, 148, 149]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [143]}}, {"event_type": "CMP", "arguments": [{"text": "multiple standardized datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multiple", "standardized", "datasets"], "offsets": [182, 183, 184]}, {"text": "strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines"], "offsets": [179, 180]}, {"text": "effectiveness", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["effectiveness"], "offsets": [167]}, {"text": "superior", "nugget_type": "STR", "argument_type": "Result", "tokens": ["superior"], "offsets": [166]}, {"text": "dct encoding", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["dct", "encoding"], "offsets": [169, 170]}, {"text": "consistent performance improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["consistent", "performance", "improvements"], "offsets": [173, 174, 175]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [177]}}], "document": ["modern", "sentence", "encoders", "are", "used", "to", "generate", "dense", "vector", "representations", "that", "capture", "the", "underlying", "linguistic", "characteristics", "for", "a", "sequence", "of", "words", ",", "including", "phrases", ",", "sentences", ",", "or", "paragraphs", ".", "these", "kinds", "of", "representations", "are", "ideal", "for", "training", "a", "classifier", "for", "an", "end", "task", "such", "as", "sentiment", "analysis", ",", "question", "answering", "and", "text", "classification", ".", "different", "models", "have", "been", "proposed", "to", "efficiently", "generate", "general", "purpose", "sentence", "representations", "to", "be", "used", "in", "pretraining", "protocols", ".", "while", "averaging", "is", "the", "most", "commonly", "used", "efficient", "sentence", "encoder", ",", "discrete", "cosine", "transform", "(", "dct", ")", "was", "recently", "proposed", "as", "an", "alternative", "that", "captures", "the", "underlying", "syntactic", "characteristics", "of", "a", "given", "text", "without", "compromising", "practical", "efficiency", "compared", "to", "averaging", ".", "however", ",", "as", "with", "most", "other", "sentence", "encoders", ",", "the", "dct", "sentence", "encoder", "was", "only", "evaluated", "in", "english", ".", "to", "this", "end", ",", "we", "utilize", "dct", "encoder", "to", "generate", "universal", "sentence", "representation", "for", "different", "languages", "such", "as", "german", ",", "french", ",", "spanish", "and", "russian", ".", "the", "experimental", "results", "clearly", "show", "the", "superior", "effectiveness", "of", "dct", "encoding", "in", "which", "consistent", "performance", "improvements", "are", "achieved", "over", "strong", "baselines", "on", "multiple", "standardized", "datasets"]}, {"venue": "ACL", "title": "Multi-Sentence Argument Linking", "abstract": "We present a novel document-level model for finding argument spans that fill an event\u2019s roles, connecting related ideas in sentence-level semantic role labeling and coreference resolution. Because existing datasets for cross-sentence linking are small, development of our neural model is supported through the creation of a new resource, Roles Across Multiple Sentences (RAMS), which contains 9,124 annotated events across 139 types. We demonstrate strong performance of our model on RAMS and other event-related datasets.", "doc_id": "8a5d28a61a04bed5a55103d1c6a8c6ba", "publication_year": 2020, "sentences": ["we present a novel document - level model for finding argument spans that fill an event \u2019 s roles , connecting related ideas in sentence - level semantic role labeling and coreference resolution .", "because existing datasets for cross - sentence linking are small , development of our neural model is supported through the creation of a new resource , roles across multiple sentences ( rams ) , which contains 9 , 124 annotated events across 139 types .", "we demonstrate strong performance of our model on rams and other event - related datasets ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "document - level model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["document", "-", "level", "model"], "offsets": [4, 5, 6, 7]}, {"text": "finding", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["finding"], "offsets": [9]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "argument spans that fill an event \u2019 s roles", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["argument", "spans", "that", "fill", "an", "event", "\u2019", "s", "roles"], "offsets": [10, 11, 12, 13, 14, 15, 16, 17, 18]}], "trigger": {"text": "finding", "tokens": ["finding"], "offsets": [9]}}, {"event_type": "MDS", "arguments": [{"text": "related ideas", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["related", "ideas"], "offsets": [21, 22]}, {"text": "in sentence - level semantic role labeling and coreference resolution", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "sentence", "-", "level", "semantic", "role", "labeling", "and", "coreference", "resolution"], "offsets": [23, 24, 25, 26, 27, 28, 29, 30, 31, 32]}], "trigger": {"text": "connecting", "tokens": ["connecting"], "offsets": [20]}}, {"event_type": "RWF", "arguments": [{"text": "existing datasets", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["existing", "datasets"], "offsets": [35, 36]}, {"text": "cross - sentence linking", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["cross", "-", "sentence", "linking"], "offsets": [38, 39, 40, 41]}], "trigger": {"text": "small", "tokens": ["small"], "offsets": [43]}}, {"event_type": "MDS", "arguments": [{"text": "neural model", "nugget_type": "APP", "argument_type": "Target", "tokens": ["neural", "model"], "offsets": [48, 49]}, {"text": "roles across multiple sentences", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["roles", "across", "multiple", "sentences"], "offsets": [60, 61, 62, 63]}], "trigger": {"text": "supported", "tokens": ["supported"], "offsets": [51]}}, {"event_type": "FAC", "arguments": [{"text": "strong performance of our model", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["strong", "performance", "of", "our", "model"], "offsets": [81, 82, 83, 84, 85]}, {"text": "roles across multiple sentences", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["roles", "across", "multiple", "sentences"], "offsets": [60, 61, 62, 63]}, {"text": "other event - related datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["other", "event", "-", "related", "datasets"], "offsets": [89, 90, 91, 92, 93]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [80]}}], "document": ["we", "present", "a", "novel", "document", "-", "level", "model", "for", "finding", "argument", "spans", "that", "fill", "an", "event", "\u2019", "s", "roles", ",", "connecting", "related", "ideas", "in", "sentence", "-", "level", "semantic", "role", "labeling", "and", "coreference", "resolution", ".", "because", "existing", "datasets", "for", "cross", "-", "sentence", "linking", "are", "small", ",", "development", "of", "our", "neural", "model", "is", "supported", "through", "the", "creation", "of", "a", "new", "resource", ",", "roles", "across", "multiple", "sentences", "(", "rams", ")", ",", "which", "contains", "9", ",", "124", "annotated", "events", "across", "139", "types", ".", "we", "demonstrate", "strong", "performance", "of", "our", "model", "on", "rams", "and", "other", "event", "-", "related", "datasets", "."]}, {"venue": "ACL", "title": "ReACC: A Retrieval-Augmented Code Completion Framework", "abstract": "Code completion, which aims to predict the following code token(s) according to the code context, can improve the productivity of software development. Recent work has proved that statistical language modeling with transformers can greatly improve the performance in the code completion task via learning from large-scale source code datasets. However, current approaches focus only on code context within the file or project, i.e. internal context. Our distinction is utilizing \u201dexternal\u201d context, inspired by human behaviors of copying from the related code snippets when writing code. Specifically, we propose a retrieval-augmented code completion framework, leveraging both lexical copying and referring to code with similar semantics by retrieval. We adopt a stage-wise training approach that combines a source code retriever and an auto-regressive language model for programming language. We evaluate our approach in the code completion task in Python and Java programming languages, achieving a state-of-the-art performance on CodeXGLUE benchmark.", "doc_id": "730dc4c009e1028b0dd75e9d521f087a", "publication_year": 2022, "sentences": ["code completion , which aims to predict the following code token ( s ) according to the code context , can improve the productivity of software development .", "recent work has proved that statistical language modeling with transformers can greatly improve the performance in the code completion task via learning from large - scale source code datasets .", "however , current approaches focus only on code context within the file or project , i . e . internal context .", "our distinction is utilizing \u201d external \u201d context , inspired by human behaviors of copying from the related code snippets when writing code .", "specifically , we propose a retrieval - augmented code completion framework , leveraging both lexical copying and referring to code with similar semantics by retrieval .", "we adopt a stage - wise training approach that combines a source code retriever and an auto - regressive language model for programming language .", "we evaluate our approach in the code completion task in python and java programming languages , achieving a state - of - the - art performance on codexglue benchmark ."], "events": [{"event_type": "ITT", "arguments": [{"text": "code completion", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["code", "completion"], "offsets": [0, 1]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "current approaches", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["current", "approaches"], "offsets": [60, 61]}, {"text": "code context", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["code", "context"], "offsets": [65, 66]}], "trigger": {"text": "focus", "tokens": ["focus"], "offsets": [62]}}, {"event_type": "WKS", "arguments": [{"text": "\u201d external \u201d context", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["\u201d", "external", "\u201d", "context"], "offsets": [84, 85, 86, 87]}], "trigger": {"text": "utilizing", "tokens": ["utilizing"], "offsets": [83]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [106]}, {"text": "retrieval - augmented code completion framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["retrieval", "-", "augmented", "code", "completion", "framework"], "offsets": [109, 110, 111, 112, 113, 114]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [107]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [130]}, {"text": "stage - wise training approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["stage", "-", "wise", "training", "approach"], "offsets": [133, 134, 135, 136, 137]}], "trigger": {"text": "adopt", "tokens": ["adopt"], "offsets": [131]}}, {"event_type": "MDS", "arguments": [{"text": "source code retriever", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["source", "code", "retriever"], "offsets": [141, 142, 143]}, {"text": "auto - regressive language model", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["auto", "-", "regressive", "language", "model"], "offsets": [146, 147, 148, 149, 150]}], "trigger": {"text": "combines", "tokens": ["combines"], "offsets": [139]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [155]}, {"text": "retrieval - augmented code completion framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["retrieval", "-", "augmented", "code", "completion", "framework"], "offsets": [109, 110, 111, 112, 113, 114]}, {"text": "in the code completion task in python and java programming languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "code", "completion", "task", "in", "python", "and", "java", "programming", "languages"], "offsets": [159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [156]}}, {"event_type": "FAC", "arguments": [{"text": "retrieval - augmented code completion framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["retrieval", "-", "augmented", "code", "completion", "framework"], "offsets": [109, 110, 111, 112, 113, 114]}, {"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [173, 174, 175, 176, 177, 178, 179, 180]}, {"text": "on codexglue benchmark", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "codexglue", "benchmark"], "offsets": [181, 182, 183]}], "trigger": {"text": "achieving", "tokens": ["achieving"], "offsets": [171]}}, {"event_type": "RWS", "arguments": [{"text": "large - scale source code datasets", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["large", "-", "scale", "source", "code", "datasets"], "offsets": [51, 52, 53, 54, 55, 56]}, {"text": "statistical language modeling with transformers", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["statistical", "language", "modeling", "with", "transformers"], "offsets": [33, 34, 35, 36, 37]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [40]}], "trigger": {"text": "learning", "tokens": ["learning"], "offsets": [49]}}, {"event_type": "PUR", "arguments": [{"text": "performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["performance"], "offsets": [42]}, {"text": "in the code completion task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "code", "completion", "task"], "offsets": [43, 44, 45, 46, 47]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [40]}}, {"event_type": "MDS", "arguments": [{"text": "lexical copying", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["lexical", "copying"], "offsets": [118, 119]}, {"text": "referring", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["referring"], "offsets": [121]}, {"text": "similar semantics", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["similar", "semantics"], "offsets": [125, 126]}], "trigger": {"text": "code", "tokens": ["code"], "offsets": [123]}}], "document": ["code", "completion", ",", "which", "aims", "to", "predict", "the", "following", "code", "token", "(", "s", ")", "according", "to", "the", "code", "context", ",", "can", "improve", "the", "productivity", "of", "software", "development", ".", "recent", "work", "has", "proved", "that", "statistical", "language", "modeling", "with", "transformers", "can", "greatly", "improve", "the", "performance", "in", "the", "code", "completion", "task", "via", "learning", "from", "large", "-", "scale", "source", "code", "datasets", ".", "however", ",", "current", "approaches", "focus", "only", "on", "code", "context", "within", "the", "file", "or", "project", ",", "i", ".", "e", ".", "internal", "context", ".", "our", "distinction", "is", "utilizing", "\u201d", "external", "\u201d", "context", ",", "inspired", "by", "human", "behaviors", "of", "copying", "from", "the", "related", "code", "snippets", "when", "writing", "code", ".", "specifically", ",", "we", "propose", "a", "retrieval", "-", "augmented", "code", "completion", "framework", ",", "leveraging", "both", "lexical", "copying", "and", "referring", "to", "code", "with", "similar", "semantics", "by", "retrieval", ".", "we", "adopt", "a", "stage", "-", "wise", "training", "approach", "that", "combines", "a", "source", "code", "retriever", "and", "an", "auto", "-", "regressive", "language", "model", "for", "programming", "language", ".", "we", "evaluate", "our", "approach", "in", "the", "code", "completion", "task", "in", "python", "and", "java", "programming", "languages", ",", "achieving", "a", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "codexglue", "benchmark", "."]}, {"venue": "ACL", "title": "Shared-Private Bilingual Word Embeddings for Neural Machine Translation", "abstract": "Word embedding is central to neural machine translation (NMT), which has attracted intensive research interest in recent years. In NMT, the source embedding plays the role of the entrance while the target embedding acts as the terminal. These layers occupy most of the model parameters for representation learning. Furthermore, they indirectly interface via a soft-attention mechanism, which makes them comparatively isolated. In this paper, we propose shared-private bilingual word embeddings, which give a closer relationship between the source and target embeddings, and which also reduce the number of model parameters. For similar source and target words, their embeddings tend to share a part of the features and they cooperatively learn these common representation units. Experiments on 5 language pairs belonging to 6 different language families and written in 5 different alphabets demonstrate that the proposed model provides a significant performance boost over the strong baselines with dramatically fewer model parameters.", "doc_id": "80d3f0f76bac6795272f718219290a5d", "publication_year": 2019, "sentences": ["word embedding is central to neural machine translation ( nmt ) , which has attracted intensive research interest in recent years .", "in nmt , the source embedding plays the role of the entrance while the target embedding acts as the terminal .", "these layers occupy most of the model parameters for representation learning .", "furthermore , they indirectly interface via a soft - attention mechanism , which makes them comparatively isolated .", "in this paper , we propose shared - private bilingual word embeddings , which give a closer relationship between the source and target embeddings , and which also reduce the number of model parameters .", "for similar source and target words , their embeddings tend to share a part of the features and they cooperatively learn these common representation units .", "experiments on 5 language pairs belonging to 6 different language families and written in 5 different alphabets demonstrate that the proposed model provides a significant performance boost over the strong baselines with dramatically fewer model parameters ."], "events": [{"event_type": "ITT", "arguments": [{"text": "word embedding", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["word", "embedding"], "offsets": [0, 1]}], "trigger": {"text": "central", "tokens": ["central"], "offsets": [3]}}, {"event_type": "RWF", "arguments": [{"text": "indirectly interface", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["indirectly", "interface"], "offsets": [58, 59]}, {"text": "comparatively isolated", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["comparatively", "isolated"], "offsets": [70, 71]}], "trigger": {"text": "indirectly interface", "tokens": ["indirectly", "interface"], "offsets": [58, 59]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [77]}, {"text": "shared - private bilingual word embeddings", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["shared", "-", "private", "bilingual", "word", "embeddings"], "offsets": [79, 80, 81, 82, 83, 84]}, {"text": "give", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["give"], "offsets": [87]}, {"text": "reduce", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reduce"], "offsets": [101]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [78]}}, {"event_type": "PUR", "arguments": [{"text": "closer relationship between the source and target embeddings", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["closer", "relationship", "between", "the", "source", "and", "target", "embeddings"], "offsets": [89, 90, 91, 92, 93, 94, 95, 96]}], "trigger": {"text": "give", "tokens": ["give"], "offsets": [87]}}, {"event_type": "PUR", "arguments": [{"text": "number of model parameters", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["number", "of", "model", "parameters"], "offsets": [103, 104, 105, 106]}], "trigger": {"text": "reduce", "tokens": ["reduce"], "offsets": [101]}}, {"event_type": "CMP", "arguments": [{"text": "shared - private bilingual word embeddings", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["shared", "-", "private", "bilingual", "word", "embeddings"], "offsets": [79, 80, 81, 82, 83, 84]}, {"text": "significant", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significant"], "offsets": [158]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [159]}, {"text": "boost", "nugget_type": "STR", "argument_type": "Result", "tokens": ["boost"], "offsets": [160]}, {"text": "strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines"], "offsets": [163, 164]}, {"text": "with dramatically fewer model parameters", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "dramatically", "fewer", "model", "parameters"], "offsets": [165, 166, 167, 168, 169]}, {"text": "on 5 language pairs belonging to 6 different language families", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "5", "language", "pairs", "belonging", "to", "6", "different", "language", "families"], "offsets": [135, 136, 137, 138, 139, 140, 141, 142, 143, 144]}, {"text": "written in 5 different alphabets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["written", "in", "5", "different", "alphabets"], "offsets": [146, 147, 148, 149, 150]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [156]}}], "document": ["word", "embedding", "is", "central", "to", "neural", "machine", "translation", "(", "nmt", ")", ",", "which", "has", "attracted", "intensive", "research", "interest", "in", "recent", "years", ".", "in", "nmt", ",", "the", "source", "embedding", "plays", "the", "role", "of", "the", "entrance", "while", "the", "target", "embedding", "acts", "as", "the", "terminal", ".", "these", "layers", "occupy", "most", "of", "the", "model", "parameters", "for", "representation", "learning", ".", "furthermore", ",", "they", "indirectly", "interface", "via", "a", "soft", "-", "attention", "mechanism", ",", "which", "makes", "them", "comparatively", "isolated", ".", "in", "this", "paper", ",", "we", "propose", "shared", "-", "private", "bilingual", "word", "embeddings", ",", "which", "give", "a", "closer", "relationship", "between", "the", "source", "and", "target", "embeddings", ",", "and", "which", "also", "reduce", "the", "number", "of", "model", "parameters", ".", "for", "similar", "source", "and", "target", "words", ",", "their", "embeddings", "tend", "to", "share", "a", "part", "of", "the", "features", "and", "they", "cooperatively", "learn", "these", "common", "representation", "units", ".", "experiments", "on", "5", "language", "pairs", "belonging", "to", "6", "different", "language", "families", "and", "written", "in", "5", "different", "alphabets", "demonstrate", "that", "the", "proposed", "model", "provides", "a", "significant", "performance", "boost", "over", "the", "strong", "baselines", "with", "dramatically", "fewer", "model", "parameters", "."]}, {"venue": "ACL", "title": "Multimodal Neural Graph Memory Networks for Visual Question Answering", "abstract": "We introduce a new neural network architecture, Multimodal Neural Graph Memory Networks (MN-GMN), for visual question answering. The MN-GMN uses graph structure with different region features as node attributes and applies a recently proposed powerful graph neural network model, Graph Network (GN), to reason about objects and their interactions in an image. The input module of the MN-GMN generates a set of visual features plus a set of encoded region-grounded captions (RGCs) for the image. The RGCs capture object attributes and their relationships. Two GNs are constructed from the input module using the visual features and encoded RGCs. Each node of the GNs iteratively computes a question-guided contextualized representation of the visual/textual information assigned to it. Then, to combine the information from both GNs, the nodes write the updated representations to an external spatial memory. The final states of the memory cells are fed into an answer module to predict an answer. Experiments show MN-GMN rivals the state-of-the-art models on Visual7W, VQA-v2.0, and CLEVR datasets.", "doc_id": "1850281198135689f9949a4736e3cbd3", "publication_year": 2020, "sentences": ["we introduce a new neural network architecture , multimodal neural graph memory networks ( mn - gmn ) , for visual question answering .", "the mn - gmn uses graph structure with different region features as node attributes and applies a recently proposed powerful graph neural network model , graph network ( gn ) , to reason about objects and their interactions in an image .", "the input module of the mn - gmn generates a set of visual features plus a set of encoded region - grounded captions ( rgcs ) for the image .", "the rgcs capture object attributes and their relationships .", "two gns are constructed from the input module using the visual features and encoded rgcs .", "each node of the gns iteratively computes a question - guided contextualized representation of the visual / textual information assigned to it .", "then , to combine the information from both gns , the nodes write the updated representations to an external spatial memory .", "the final states of the memory cells are fed into an answer module to predict an answer .", "experiments show mn - gmn rivals the state - of - the - art models on visual7w , vqa - v2 . 0 , and clevr datasets ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "neural network architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neural", "network", "architecture"], "offsets": [4, 5, 6]}, {"text": "visual question answering", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["visual", "question", "answering"], "offsets": [20, 21, 22]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [1]}}, {"event_type": "MDS", "arguments": [{"text": "graph structure with different region features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["graph", "structure", "with", "different", "region", "features"], "offsets": [29, 30, 31, 32, 33, 34]}, {"text": "recently proposed powerful graph neural network model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["recently", "proposed", "powerful", "graph", "neural", "network", "model"], "offsets": [41, 42, 43, 44, 45, 46, 47]}, {"text": "reason", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reason"], "offsets": [56]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [28]}}, {"event_type": "PUR", "arguments": [{"text": "objects", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["objects"], "offsets": [58]}, {"text": "interactions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["interactions"], "offsets": [61]}, {"text": "in an image", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "an", "image"], "offsets": [62, 63, 64]}], "trigger": {"text": "reason", "tokens": ["reason"], "offsets": [56]}}, {"event_type": "MDS", "arguments": [{"text": "input module", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["input", "module"], "offsets": [67, 68]}, {"text": "set of visual features", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["set", "of", "visual", "features"], "offsets": [76, 77, 78, 79]}, {"text": "set of encoded region - grounded captions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["set", "of", "encoded", "region", "-", "grounded", "captions"], "offsets": [82, 83, 84, 85, 86, 87, 88]}], "trigger": {"text": "generates", "tokens": ["generates"], "offsets": [74]}}, {"event_type": "MDS", "arguments": [{"text": "region - grounded captions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["region", "-", "grounded", "captions"], "offsets": [85, 86, 87, 88]}, {"text": "object attributes", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["object", "attributes"], "offsets": [99, 100]}, {"text": "relationships", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["relationships"], "offsets": [103]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [98]}}, {"event_type": "MDS", "arguments": [{"text": "visual features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["visual", "features"], "offsets": [115, 116]}, {"text": "encoded rgcs", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["encoded", "region", "-", "grounded", "captions"], "offsets": [118, 85, 86, 87, 88]}, {"text": "from the input module", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "the", "input", "module"], "offsets": [109, 110, 111, 112]}, {"text": "two gns", "nugget_type": "APP", "argument_type": "Target", "tokens": ["two", "graph", "network"], "offsets": [105, 49, 50]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [113]}}, {"event_type": "MDS", "arguments": [{"text": "each node of the gns", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["each", "node", "of", "the", "two", "graph", "network"], "offsets": [121, 122, 123, 124, 105, 49, 50]}, {"text": "question - guided contextualized representation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["question", "-", "guided", "contextualized", "representation"], "offsets": [129, 130, 131, 132, 133]}], "trigger": {"text": "iteratively computes", "tokens": ["iteratively", "computes"], "offsets": [126, 127]}}, {"event_type": "MDS", "arguments": [{"text": "combine", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["combine"], "offsets": [147]}, {"text": "updated representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["updated", "representations"], "offsets": [158, 159]}, {"text": "external spatial memory", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["external", "spatial", "memory"], "offsets": [162, 163, 164]}], "trigger": {"text": "write", "tokens": ["write"], "offsets": [156]}}, {"event_type": "PUR", "arguments": [{"text": "information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["information"], "offsets": [149]}, {"text": "from both gns", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "two", "graph", "network"], "offsets": [150, 105, 49, 50]}], "trigger": {"text": "combine", "tokens": ["combine"], "offsets": [147]}}, {"event_type": "MDS", "arguments": [{"text": "final states of the memory cells", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["final", "states", "of", "the", "memory", "cells"], "offsets": [167, 168, 169, 170, 171, 172]}, {"text": "answer module", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["answer", "module"], "offsets": [177, 178]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [180]}], "trigger": {"text": "fed into", "tokens": ["fed", "into"], "offsets": [174, 175]}}, {"event_type": "PUR", "arguments": [{"text": "answer", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["answer"], "offsets": [182]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [180]}}, {"event_type": "CMP", "arguments": [{"text": "multimodal neural graph memory networks", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["multimodal", "neural", "graph", "memory", "networks"], "offsets": [8, 9, 10, 11, 12]}, {"text": "rivals", "nugget_type": "STR", "argument_type": "Result", "tokens": ["rivals"], "offsets": [189]}, {"text": "state - of - the - art models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "models"], "offsets": [191, 192, 193, 194, 195, 196, 197, 198]}, {"text": "visual7w , vqa - v2 . 0 , datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["visual7w", ",", "vqa", "-", "v2", ".", "0", ",", "datasets"], "offsets": [200, 201, 202, 203, 204, 205, 206, 207, 210]}], "trigger": {"text": "rivals", "tokens": ["rivals"], "offsets": [189]}}], "document": ["we", "introduce", "a", "new", "neural", "network", "architecture", ",", "multimodal", "neural", "graph", "memory", "networks", "(", "mn", "-", "gmn", ")", ",", "for", "visual", "question", "answering", ".", "the", "mn", "-", "gmn", "uses", "graph", "structure", "with", "different", "region", "features", "as", "node", "attributes", "and", "applies", "a", "recently", "proposed", "powerful", "graph", "neural", "network", "model", ",", "graph", "network", "(", "gn", ")", ",", "to", "reason", "about", "objects", "and", "their", "interactions", "in", "an", "image", ".", "the", "input", "module", "of", "the", "mn", "-", "gmn", "generates", "a", "set", "of", "visual", "features", "plus", "a", "set", "of", "encoded", "region", "-", "grounded", "captions", "(", "rgcs", ")", "for", "the", "image", ".", "the", "rgcs", "capture", "object", "attributes", "and", "their", "relationships", ".", "two", "gns", "are", "constructed", "from", "the", "input", "module", "using", "the", "visual", "features", "and", "encoded", "rgcs", ".", "each", "node", "of", "the", "gns", "iteratively", "computes", "a", "question", "-", "guided", "contextualized", "representation", "of", "the", "visual", "/", "textual", "information", "assigned", "to", "it", ".", "then", ",", "to", "combine", "the", "information", "from", "both", "gns", ",", "the", "nodes", "write", "the", "updated", "representations", "to", "an", "external", "spatial", "memory", ".", "the", "final", "states", "of", "the", "memory", "cells", "are", "fed", "into", "an", "answer", "module", "to", "predict", "an", "answer", ".", "experiments", "show", "mn", "-", "gmn", "rivals", "the", "state", "-", "of", "-", "the", "-", "art", "models", "on", "visual7w", ",", "vqa", "-", "v2", ".", "0", ",", "and", "clevr", "datasets", "."]}, {"venue": "ACL", "title": "How Does NLP Benefit Legal System: A Summary of Legal Artificial Intelligence", "abstract": "Legal Artificial Intelligence (LegalAI) focuses on applying the technology of artificial intelligence, especially natural language processing, to benefit tasks in the legal domain. In recent years, LegalAI has drawn increasing attention rapidly from both AI researchers and legal professionals, as LegalAI is beneficial to the legal system for liberating legal professionals from a maze of paperwork. Legal professionals often think about how to solve tasks from rule-based and symbol-based methods, while NLP researchers concentrate more on data-driven and embedding methods. In this paper, we introduce the history, the current state, and the future directions of research in LegalAI. We illustrate the tasks from the perspectives of legal professionals and NLP researchers and show several representative applications in LegalAI. We conduct experiments and provide an in-depth analysis of the advantages and disadvantages of existing works to explore possible future directions. You can find the implementation of our work from https://github.com/thunlp/CLAIM.", "doc_id": "0d52c2c9b186f49b22b9ddc3f1522fe6", "publication_year": 2020, "sentences": ["legal artificial intelligence ( legalai ) focuses on applying the technology of artificial intelligence , especially natural language processing , to benefit tasks in the legal domain .\\nin recent years , legalai has drawn increasing attention rapidly from both ai researchers and legal professionals , as legalai is beneficial to the legal system for liberating legal professionals from a maze of paperwork .\\nlegal professionals often think about how to solve tasks from rule - based and symbol - based methods , while nlp researchers concentrate more on data - driven and embedding methods .\\nin this paper , we introduce the history , the current state , and the future directions of research in legalai .\\nwe illustrate the tasks from the perspectives of legal professionals and nlp researchers and show several representative applications in legalai .\\nwe conduct experiments and provide an in - depth analysis of the advantages and disadvantages of existing works to explore possible future directions .\\nyou can find the implementation of our work from https : / / github . com / thunlp / claim ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [97]}, {"text": "the history", "nugget_type": "APP", "argument_type": "Content", "tokens": ["the", "history"], "offsets": [99, 100]}, {"text": "the current state", "nugget_type": "APP", "argument_type": "Content", "tokens": ["the", "current", "state"], "offsets": [102, 103, 104]}, {"text": "the future directions", "nugget_type": "APP", "argument_type": "Content", "tokens": ["the", "future", "directions"], "offsets": [107, 108, 109]}, {"text": "research in legalai", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["research", "in", "legalai"], "offsets": [111, 112, 113]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [98]}}, {"event_type": "RWS", "arguments": [{"text": "legal artificial intelligence ( legalai )", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["legal", "artificial", "intelligence", "(", "legalai", ")"], "offsets": [0, 1, 2, 3, 4, 5]}, {"text": "applying the technology of artificial intelligence", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["applying", "the", "technology", "of", "artificial", "intelligence"], "offsets": [8, 9, 10, 11, 12, 13]}, {"text": "especially natural language processing", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["especially", "natural", "language", "processing"], "offsets": [15, 16, 17, 18]}, {"text": "benefit tasks in the legal domain", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["benefit", "tasks", "in", "the", "legal", "domain"], "offsets": [21, 22, 23, 24, 25, 26]}], "trigger": {"text": "focuses on", "tokens": ["focuses", "on"], "offsets": [6, 7]}}, {"event_type": "MDS", "arguments": [{"text": "from the perspectives of legal professionals and nlp researchers", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "the", "perspectives", "of", "legal", "professionals", "and", "nlp", "researchers"], "offsets": [118, 119, 120, 121, 122, 123, 124, 125, 126]}, {"text": "the tasks", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["the", "tasks"], "offsets": [116, 117]}], "trigger": {"text": "illustrate", "tokens": ["illustrate"], "offsets": [115]}}, {"event_type": "MDS", "arguments": [{"text": "in legalai", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "legalai"], "offsets": [132, 133]}, {"text": "several representative applications", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["several", "representative", "applications"], "offsets": [129, 130, 131]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [128]}}, {"event_type": "MDS", "arguments": [{"text": "experiments", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["experiments"], "offsets": [136]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [135]}}, {"event_type": "MDS", "arguments": [{"text": "an in - depth analysis of the advantages and disadvantages of existing works", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["an", "in", "-", "depth", "analysis", "of", "the", "advantages", "and", "disadvantages", "of", "existing", "works"], "offsets": [139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151]}, {"text": "explore possible future directions", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["explore", "possible", "future", "directions"], "offsets": [153, 154, 155, 156]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [138]}}], "document": ["legal", "artificial", "intelligence", "(", "legalai", ")", "focuses", "on", "applying", "the", "technology", "of", "artificial", "intelligence", ",", "especially", "natural", "language", "processing", ",", "to", "benefit", "tasks", "in", "the", "legal", "domain", ".\\nin", "recent", "years", ",", "legalai", "has", "drawn", "increasing", "attention", "rapidly", "from", "both", "ai", "researchers", "and", "legal", "professionals", ",", "as", "legalai", "is", "beneficial", "to", "the", "legal", "system", "for", "liberating", "legal", "professionals", "from", "a", "maze", "of", "paperwork", ".\\nlegal", "professionals", "often", "think", "about", "how", "to", "solve", "tasks", "from", "rule", "-", "based", "and", "symbol", "-", "based", "methods", ",", "while", "nlp", "researchers", "concentrate", "more", "on", "data", "-", "driven", "and", "embedding", "methods", ".\\nin", "this", "paper", ",", "we", "introduce", "the", "history", ",", "the", "current", "state", ",", "and", "the", "future", "directions", "of", "research", "in", "legalai", ".\\nwe", "illustrate", "the", "tasks", "from", "the", "perspectives", "of", "legal", "professionals", "and", "nlp", "researchers", "and", "show", "several", "representative", "applications", "in", "legalai", ".\\nwe", "conduct", "experiments", "and", "provide", "an", "in", "-", "depth", "analysis", "of", "the", "advantages", "and", "disadvantages", "of", "existing", "works", "to", "explore", "possible", "future", "directions", ".\\nyou", "can", "find", "the", "implementation", "of", "our", "work", "from", "https", ":", "/", "/", "github", ".", "com", "/", "thunlp", "/", "claim", "."]}, {"venue": "ACL", "title": "Bringing Structure into Summaries: a Faceted Summarization Dataset for Long Scientific Documents", "abstract": "Faceted summarization provides briefings of a document from different perspectives. Readers can quickly comprehend the main points of a long document with the help of a structured outline. However, little research has been conducted on this subject, partially due to the lack of large-scale faceted summarization datasets. In this study, we present FacetSum, a faceted summarization benchmark built on Emerald journal articles, covering a diverse range of domains. Different from traditional document-summary pairs, FacetSum provides multiple summaries, each targeted at specific sections of a long document, including the purpose, method, findings, and value. Analyses and empirical results on our dataset reveal the importance of bringing structure into summaries. We believe FacetSum will spur further advances in summarization research and foster the development of NLP systems that can leverage the structured information in both long texts and summaries.", "doc_id": "d7f5243127521abdc77b8e2d1f3270a3", "publication_year": 2021, "sentences": ["faceted summarization provides briefings of a document from different perspectives .", "readers can quickly comprehend the main points of a long document with the help of a structured outline .", "however , little research has been conducted on this subject , partially due to the lack of large - scale faceted summarization datasets .", "in this study , we present facetsum , a faceted summarization benchmark built on emerald journal articles , covering a diverse range of domains .", "different from traditional document - summary pairs , facetsum provides multiple summaries , each targeted at specific sections of a long document , including the purpose , method , findings , and value .", "analyses and empirical results on our dataset reveal the importance of bringing structure into summaries .", "we believe facetsum will spur further advances in summarization research and foster the development of nlp systems that can leverage the structured information in both long texts and summaries ."], "events": [{"event_type": "FAC", "arguments": [{"text": "importance of bringing structure into summaries", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["importance", "of", "bringing", "structure", "into", "summaries"], "offsets": [122, 123, 124, 125, 126, 127]}, {"text": "faceted summarization benchmark", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["faceted", "summarization", "benchmark"], "offsets": [63, 64, 65]}], "trigger": {"text": "reveal", "tokens": ["reveal"], "offsets": [120]}}, {"event_type": "RWF", "arguments": [{"text": "large - scale faceted summarization datasets", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["large", "-", "scale", "faceted", "summarization", "datasets"], "offsets": [47, 48, 49, 50, 51, 52]}], "trigger": {"text": "lack", "tokens": ["lack"], "offsets": [45]}}, {"event_type": "RWF", "arguments": [{"text": "faceted summarization", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["faceted", "summarization"], "offsets": [0, 1]}, {"text": "little research", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["little", "research"], "offsets": [32, 33]}], "trigger": {"text": "conducted", "tokens": ["conducted"], "offsets": [36]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [58]}, {"text": "faceted summarization benchmark", "nugget_type": "DST", "argument_type": "Content", "tokens": ["faceted", "summarization", "benchmark"], "offsets": [63, 64, 65]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [59]}}, {"event_type": "ITT", "arguments": [{"text": "faceted summarization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["faceted", "summarization"], "offsets": [0, 1]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [2]}}, {"event_type": "FAC", "arguments": [{"text": "facetsum", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["facetsum"], "offsets": [131]}, {"text": "structured information", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["structured", "information"], "offsets": [150, 151]}, {"text": "in both long texts and summaries", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "both", "long", "texts", "and", "summaries"], "offsets": [152, 153, 154, 155, 156, 157]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [148]}}], "document": ["faceted", "summarization", "provides", "briefings", "of", "a", "document", "from", "different", "perspectives", ".", "readers", "can", "quickly", "comprehend", "the", "main", "points", "of", "a", "long", "document", "with", "the", "help", "of", "a", "structured", "outline", ".", "however", ",", "little", "research", "has", "been", "conducted", "on", "this", "subject", ",", "partially", "due", "to", "the", "lack", "of", "large", "-", "scale", "faceted", "summarization", "datasets", ".", "in", "this", "study", ",", "we", "present", "facetsum", ",", "a", "faceted", "summarization", "benchmark", "built", "on", "emerald", "journal", "articles", ",", "covering", "a", "diverse", "range", "of", "domains", ".", "different", "from", "traditional", "document", "-", "summary", "pairs", ",", "facetsum", "provides", "multiple", "summaries", ",", "each", "targeted", "at", "specific", "sections", "of", "a", "long", "document", ",", "including", "the", "purpose", ",", "method", ",", "findings", ",", "and", "value", ".", "analyses", "and", "empirical", "results", "on", "our", "dataset", "reveal", "the", "importance", "of", "bringing", "structure", "into", "summaries", ".", "we", "believe", "facetsum", "will", "spur", "further", "advances", "in", "summarization", "research", "and", "foster", "the", "development", "of", "nlp", "systems", "that", "can", "leverage", "the", "structured", "information", "in", "both", "long", "texts", "and", "summaries", "."]}, {"venue": "ACL", "title": "Reading Turn by Turn: Hierarchical Attention Architecture for Spoken Dialogue Comprehension", "abstract": "Comprehending multi-turn spoken conversations is an emerging research area, presenting challenges different from reading comprehension of passages due to the interactive nature of information exchange from at least two speakers. Unlike passages, where sentences are often the default semantic modeling unit, in multi-turn conversations, a turn is a topically coherent unit embodied with immediately relevant context, making it a linguistically intuitive segment for computationally modeling verbal interactions. Therefore, in this work, we propose a hierarchical attention neural network architecture, combining turn-level and word-level attention mechanisms, to improve spoken dialogue comprehension performance. Experiments are conducted on a multi-turn conversation dataset, where nurses inquire and discuss symptom information with patients. We empirically show that the proposed approach outperforms standard attention baselines, achieves more efficient learning outcomes, and is more robust to lengthy and out-of-distribution test samples.", "doc_id": "b1392e79e960afcd16e06dfcd3f4a5db", "publication_year": 2019, "sentences": ["comprehending multi - turn spoken conversations is an emerging research area , presenting challenges different from reading comprehension of passages due to the interactive nature of information exchange from at least two speakers .", "unlike passages , where sentences are often the default semantic modeling unit , in multi - turn conversations , a turn is a topically coherent unit embodied with immediately relevant context , making it a linguistically intuitive segment for computationally modeling verbal interactions .", "therefore , in this work , we propose a hierarchical attention neural network architecture , combining turn - level and word - level attention mechanisms , to improve spoken dialogue comprehension performance .", "experiments are conducted on a multi - turn conversation dataset , where nurses inquire and discuss symptom information with patients .", "we empirically show that the proposed approach outperforms standard attention baselines , achieves more efficient learning outcomes , and is more robust to lengthy and out - of - distribution test samples ."], "events": [{"event_type": "ITT", "arguments": [{"text": "comprehending multi - turn spoken conversations", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["comprehending", "multi", "-", "turn", "spoken", "conversations"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "emerging research area", "tokens": ["emerging", "research", "area"], "offsets": [8, 9, 10]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [84]}, {"text": "hierarchical attention neural network architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hierarchical", "attention", "neural", "network", "architecture"], "offsets": [87, 88, 89, 90, 91]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [85]}}, {"event_type": "MDS", "arguments": [{"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [105]}, {"text": "turn - level attention mechanisms", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["turn", "-", "level", "attention", "mechanisms"], "offsets": [94, 95, 96, 101, 102]}, {"text": "word - level attention mechanisms", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["word", "-", "level", "attention", "mechanisms"], "offsets": [98, 99, 100, 101, 102]}], "trigger": {"text": "combining", "tokens": ["combining"], "offsets": [93]}}, {"event_type": "PUR", "arguments": [{"text": "spoken dialogue comprehension performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["spoken", "dialogue", "comprehension", "performance"], "offsets": [106, 107, 108, 109]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [105]}}, {"event_type": "WKS", "arguments": [{"text": "experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["experiments"], "offsets": [111]}, {"text": "multi - turn conversation dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multi", "-", "turn", "conversation", "dataset"], "offsets": [116, 117, 118, 119, 120]}], "trigger": {"text": "conducted", "tokens": ["conducted"], "offsets": [113]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [132]}, {"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [139]}, {"text": "achieves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["achieves"], "offsets": [144]}, {"text": "more robust", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["more", "robust"], "offsets": [152, 153]}], "trigger": {"text": "empirically show", "tokens": ["empirically", "show"], "offsets": [133, 134]}}, {"event_type": "CMP", "arguments": [{"text": "hierarchical attention neural network architecture", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["hierarchical", "attention", "neural", "network", "architecture"], "offsets": [87, 88, 89, 90, 91]}, {"text": "standard attention baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["standard", "attention", "baselines"], "offsets": [140, 141, 142]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [139]}}, {"event_type": "CMP", "arguments": [{"text": "hierarchical attention neural network architecture", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["hierarchical", "attention", "neural", "network", "architecture"], "offsets": [87, 88, 89, 90, 91]}, {"text": "standard attention baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["standard", "attention", "baselines"], "offsets": [140, 141, 142]}, {"text": "more efficient", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "efficient"], "offsets": [145, 146]}, {"text": "learning outcomes", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["learning", "outcomes"], "offsets": [147, 148]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [144]}}, {"event_type": "CMP", "arguments": [{"text": "hierarchical attention neural network architecture", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["hierarchical", "attention", "neural", "network", "architecture"], "offsets": [87, 88, 89, 90, 91]}, {"text": "standard attention baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["standard", "attention", "baselines"], "offsets": [140, 141, 142]}, {"text": "lengthy and out - of - distribution test samples", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["lengthy", "and", "out", "-", "of", "-", "distribution", "test", "samples"], "offsets": [155, 156, 157, 158, 159, 160, 161, 162, 163]}], "trigger": {"text": "more robust", "tokens": ["more", "robust"], "offsets": [152, 153]}}], "document": ["comprehending", "multi", "-", "turn", "spoken", "conversations", "is", "an", "emerging", "research", "area", ",", "presenting", "challenges", "different", "from", "reading", "comprehension", "of", "passages", "due", "to", "the", "interactive", "nature", "of", "information", "exchange", "from", "at", "least", "two", "speakers", ".", "unlike", "passages", ",", "where", "sentences", "are", "often", "the", "default", "semantic", "modeling", "unit", ",", "in", "multi", "-", "turn", "conversations", ",", "a", "turn", "is", "a", "topically", "coherent", "unit", "embodied", "with", "immediately", "relevant", "context", ",", "making", "it", "a", "linguistically", "intuitive", "segment", "for", "computationally", "modeling", "verbal", "interactions", ".", "therefore", ",", "in", "this", "work", ",", "we", "propose", "a", "hierarchical", "attention", "neural", "network", "architecture", ",", "combining", "turn", "-", "level", "and", "word", "-", "level", "attention", "mechanisms", ",", "to", "improve", "spoken", "dialogue", "comprehension", "performance", ".", "experiments", "are", "conducted", "on", "a", "multi", "-", "turn", "conversation", "dataset", ",", "where", "nurses", "inquire", "and", "discuss", "symptom", "information", "with", "patients", ".", "we", "empirically", "show", "that", "the", "proposed", "approach", "outperforms", "standard", "attention", "baselines", ",", "achieves", "more", "efficient", "learning", "outcomes", ",", "and", "is", "more", "robust", "to", "lengthy", "and", "out", "-", "of", "-", "distribution", "test", "samples", "."]}, {"venue": "ACL", "title": "Pre-training to Match for Unified Low-shot Relation Extraction", "abstract": "Low-shot relation extraction (RE) aims to recognize novel relations with very few or even no samples, which is critical in real scenario application. Few-shot and zero-shot RE are two representative low-shot RE tasks, which seem to be with similar target but require totally different underlying abilities. In this paper, we propose Multi-Choice Matching Networks to unify low-shot relation extraction. To fill in the gap between zero-shot and few-shot RE, we propose the triplet-paraphrase meta-training, which leverages triplet paraphrase to pre-train zero-shot label matching ability and uses meta-learning paradigm to learn few-shot instance summarizing ability. Experimental results on three different low-shot RE tasks show that the proposed method outperforms strong baselines by a large margin, and achieve the best performance on few-shot RE leaderboard.", "doc_id": "5766c9940300f9e098af003546246113", "publication_year": 2022, "sentences": ["low - shot relation extraction ( re ) aims to recognize novel relations with very few or even no samples , which is critical in real scenario application .", "few - shot and zero - shot re are two representative low - shot re tasks , which seem to be with similar target but require totally different underlying abilities .", "in this paper , we propose multi - choice matching networks to unify low - shot relation extraction .", "to fill in the gap between zero - shot and few - shot re , we propose the triplet - paraphrase meta - training , which leverages triplet paraphrase to pre - train zero - shot label matching ability and uses meta - learning paradigm to learn few - shot instance summarizing ability .", "experimental results on three different low - shot re tasks show that the proposed method outperforms strong baselines by a large margin , and achieve the best performance on few - shot re leaderboard ."], "events": [{"event_type": "ITT", "arguments": [{"text": "low - shot relation extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["low", "-", "shot", "relation", "extraction"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "recognize", "tokens": ["recognize"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "different underlying abilities", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["different", "underlying", "abilities"], "offsets": [56, 57, 58]}, {"text": "few - shot and zero - shot re", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["few", "-", "shot", "and", "zero", "-", "shot", "relation", "extraction"], "offsets": [29, 30, 31, 32, 33, 34, 35, 3, 4]}], "trigger": {"text": "require", "tokens": ["require"], "offsets": [54]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [64]}, {"text": "multi - choice matching networks", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "choice", "matching", "networks"], "offsets": [66, 67, 68, 69, 70]}, {"text": "unify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["unify"], "offsets": [72]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [65]}}, {"event_type": "PUR", "arguments": [{"text": "low - shot relation extraction", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["low", "-", "shot", "relation", "extraction"], "offsets": [73, 74, 75, 76, 77]}], "trigger": {"text": "unify", "tokens": ["unify"], "offsets": [72]}}, {"event_type": "PRP", "arguments": [{"text": "triplet - paraphrase meta - training", "nugget_type": "APP", "argument_type": "Content", "tokens": ["triplet", "-", "paraphrase", "meta", "-", "training"], "offsets": [97, 98, 99, 100, 101, 102]}, {"text": "fill", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["fill"], "offsets": [80]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [94]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [95]}}, {"event_type": "PUR", "arguments": [{"text": "gap", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["gap"], "offsets": [83]}, {"text": "between zero - shot re", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "zero", "-", "shot", "relation", "extraction"], "offsets": [84, 85, 86, 87, 3, 4]}, {"text": "few - shot re", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["few", "-", "shot", "relation", "extraction"], "offsets": [89, 90, 91, 3, 4]}], "trigger": {"text": "fill", "tokens": ["fill"], "offsets": [80]}}, {"event_type": "MDS", "arguments": [{"text": "zero - shot label matching ability", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["zero", "-", "shot", "label", "matching", "ability"], "offsets": [112, 113, 114, 115, 116, 117]}, {"text": "triplet paraphrase", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["triplet", "paraphrase"], "offsets": [106, 107]}], "trigger": {"text": "pre - train", "tokens": ["pre", "-", "train"], "offsets": [109, 110, 111]}}, {"event_type": "MDS", "arguments": [{"text": "meta - learning paradigm", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["meta", "-", "learning", "paradigm"], "offsets": [120, 121, 122, 123]}, {"text": "few - shot instance summarizing ability", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["few", "-", "shot", "instance", "summarizing", "ability"], "offsets": [126, 127, 128, 129, 130, 131]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [125]}}, {"event_type": "CMP", "arguments": [{"text": "strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines"], "offsets": [149, 150]}, {"text": "on three different low - shot re tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "three", "different", "low", "-", "shot", "re", "tasks"], "offsets": [135, 136, 137, 138, 139, 140, 141, 142]}, {"text": "multi - choice matching networks", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["multi", "-", "choice", "matching", "networks"], "offsets": [66, 67, 68, 69, 70]}, {"text": "large margin", "nugget_type": "STR", "argument_type": "Result", "tokens": ["large", "margin"], "offsets": [153, 154]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [148]}}, {"event_type": "FAC", "arguments": [{"text": "on few - shot re leaderboard", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "few", "-", "shot", "re", "leaderboard"], "offsets": [161, 162, 163, 164, 165, 166]}, {"text": "best performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["best", "performance"], "offsets": [159, 160]}, {"text": "multi - choice matching networks", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multi", "-", "choice", "matching", "networks"], "offsets": [66, 67, 68, 69, 70]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [157]}}], "document": ["low", "-", "shot", "relation", "extraction", "(", "re", ")", "aims", "to", "recognize", "novel", "relations", "with", "very", "few", "or", "even", "no", "samples", ",", "which", "is", "critical", "in", "real", "scenario", "application", ".", "few", "-", "shot", "and", "zero", "-", "shot", "re", "are", "two", "representative", "low", "-", "shot", "re", "tasks", ",", "which", "seem", "to", "be", "with", "similar", "target", "but", "require", "totally", "different", "underlying", "abilities", ".", "in", "this", "paper", ",", "we", "propose", "multi", "-", "choice", "matching", "networks", "to", "unify", "low", "-", "shot", "relation", "extraction", ".", "to", "fill", "in", "the", "gap", "between", "zero", "-", "shot", "and", "few", "-", "shot", "re", ",", "we", "propose", "the", "triplet", "-", "paraphrase", "meta", "-", "training", ",", "which", "leverages", "triplet", "paraphrase", "to", "pre", "-", "train", "zero", "-", "shot", "label", "matching", "ability", "and", "uses", "meta", "-", "learning", "paradigm", "to", "learn", "few", "-", "shot", "instance", "summarizing", "ability", ".", "experimental", "results", "on", "three", "different", "low", "-", "shot", "re", "tasks", "show", "that", "the", "proposed", "method", "outperforms", "strong", "baselines", "by", "a", "large", "margin", ",", "and", "achieve", "the", "best", "performance", "on", "few", "-", "shot", "re", "leaderboard", "."]}, {"venue": "ACL", "title": "Training Adaptive Computation for Open-Domain Question Answering with Computational Constraints", "abstract": "Adaptive Computation (AC) has been shown to be effective in improving the efficiency of Open-Domain Question Answering (ODQA) systems. However, the current AC approaches require tuning of all model parameters, and training state-of-the-art ODQA models requires significant computational resources that may not be available for most researchers. We propose Adaptive Passage Encoder, an AC method that can be applied to an existing ODQA model and can be trained efficiently on a single GPU. It keeps the parameters of the base ODQA model fixed, but it overrides the default layer-by-layer computation of the encoder with an AC policy that is trained to optimise the computational efficiency of the model. Our experimental results show that our method improves upon a state-of-the-art model on two datasets, and is also more accurate than previous AC methods due to the stronger base ODQA model. All source code and datasets are available at https://github.com/uclnlp/APE.", "doc_id": "d35bb7ae293ae6ec646e166441cf474c", "publication_year": 2021, "sentences": ["adaptive computation ( ac ) has been shown to be effective in improving the efficiency of open - domain question answering ( odqa ) systems .", "however , the current ac approaches require tuning of all model parameters , and training state - of - the - art odqa models requires significant computational resources that may not be available for most researchers .", "we propose adaptive passage encoder , an ac method that can be applied to an existing odqa model and can be trained efficiently on a single gpu .", "it keeps the parameters of the base odqa model fixed , but it overrides the default layer - by - layer computation of the encoder with an ac policy that is trained to optimise the computational efficiency of the model .", "our experimental results show that our method improves upon a state - of - the - art model on two datasets , and is also more accurate than previous ac methods due to the stronger base odqa model .", "all source code and datasets are available at https : / / github . com / uclnlp / ape ."], "events": [{"event_type": "ITT", "arguments": [{"text": "adaptive computation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["adaptive", "computation"], "offsets": [0, 1]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "significant computational resources", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["significant", "computational", "resources"], "offsets": [51, 52, 53]}, {"text": "not be available", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "be", "available"], "offsets": [56, 57, 58]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [50]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [63]}, {"text": "adaptive passage encoder", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["adaptive", "passage", "encoder"], "offsets": [65, 66, 67]}, {"text": "applied", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["applied"], "offsets": [75]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [64]}}, {"event_type": "PUR", "arguments": [{"text": "existing odqa model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["existing", "odqa", "model"], "offsets": [78, 79, 80]}], "trigger": {"text": "applied", "tokens": ["applied"], "offsets": [75]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [63]}, {"text": "adaptive passage encoder", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["adaptive", "passage", "encoder"], "offsets": [65, 66, 67]}, {"text": "trained efficiently", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["trained", "efficiently"], "offsets": [84, 85]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [64]}}, {"event_type": "PUR", "arguments": [{"text": "on a single gpu", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "a", "single", "gpu"], "offsets": [86, 87, 88, 89]}], "trigger": {"text": "trained efficiently", "tokens": ["trained", "efficiently"], "offsets": [84, 85]}}, {"event_type": "WKS", "arguments": [{"text": "parameters of the base odqa model fixed", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["parameters", "of", "the", "base", "odqa", "model", "fixed"], "offsets": [94, 95, 96, 97, 98, 99, 100]}], "trigger": {"text": "keeps", "tokens": ["keeps"], "offsets": [92]}}, {"event_type": "WKS", "arguments": [{"text": "default layer - by - layer computation of the encoder", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["default", "layer", "-", "by", "-", "layer", "computation", "of", "the", "encoder"], "offsets": [106, 107, 108, 109, 110, 111, 112, 113, 114, 115]}, {"text": "with an ac policy that is trained to optimise the computational efficiency of the model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "an", "adaptive", "computation", "policy", "that", "is", "trained", "to", "optimise", "the", "computational", "efficiency", "of", "the", "model"], "offsets": [116, 117, 0, 1, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130]}], "trigger": {"text": "overrides", "tokens": ["overrides"], "offsets": [104]}}, {"event_type": "FIN", "arguments": [{"text": "improves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["improves"], "offsets": [139]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [135]}}, {"event_type": "CMP", "arguments": [{"text": "adaptive passage encoder", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["adaptive", "passage", "encoder"], "offsets": [65, 66, 67]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [139]}, {"text": "state - of - the - art model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "model"], "offsets": [142, 143, 144, 145, 146, 147, 148, 149]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [139]}}, {"event_type": "FIN", "arguments": [{"text": "accurate", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["accurate"], "offsets": [158]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [135]}}, {"event_type": "CMP", "arguments": [{"text": "adaptive passage encoder", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["adaptive", "passage", "encoder"], "offsets": [65, 66, 67]}, {"text": "previous ac methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "ac", "methods"], "offsets": [160, 161, 162]}, {"text": "more", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more"], "offsets": [157]}], "trigger": {"text": "accurate", "tokens": ["accurate"], "offsets": [158]}}], "document": ["adaptive", "computation", "(", "ac", ")", "has", "been", "shown", "to", "be", "effective", "in", "improving", "the", "efficiency", "of", "open", "-", "domain", "question", "answering", "(", "odqa", ")", "systems", ".", "however", ",", "the", "current", "ac", "approaches", "require", "tuning", "of", "all", "model", "parameters", ",", "and", "training", "state", "-", "of", "-", "the", "-", "art", "odqa", "models", "requires", "significant", "computational", "resources", "that", "may", "not", "be", "available", "for", "most", "researchers", ".", "we", "propose", "adaptive", "passage", "encoder", ",", "an", "ac", "method", "that", "can", "be", "applied", "to", "an", "existing", "odqa", "model", "and", "can", "be", "trained", "efficiently", "on", "a", "single", "gpu", ".", "it", "keeps", "the", "parameters", "of", "the", "base", "odqa", "model", "fixed", ",", "but", "it", "overrides", "the", "default", "layer", "-", "by", "-", "layer", "computation", "of", "the", "encoder", "with", "an", "ac", "policy", "that", "is", "trained", "to", "optimise", "the", "computational", "efficiency", "of", "the", "model", ".", "our", "experimental", "results", "show", "that", "our", "method", "improves", "upon", "a", "state", "-", "of", "-", "the", "-", "art", "model", "on", "two", "datasets", ",", "and", "is", "also", "more", "accurate", "than", "previous", "ac", "methods", "due", "to", "the", "stronger", "base", "odqa", "model", ".", "all", "source", "code", "and", "datasets", "are", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "uclnlp", "/", "ape", "."]}, {"venue": "ACL", "title": "R4C: A Benchmark for Evaluating RC Systems to Get the Right Answer for the Right Reason", "abstract": "Recent studies have revealed that reading comprehension (RC) systems learn to exploit annotation artifacts and other biases in current datasets. This prevents the community from reliably measuring the progress of RC systems. To address this issue, we introduce R4C, a new task for evaluating RC systems\u2019 internal reasoning. R4C requires giving not only answers but also derivations: explanations that justify predicted answers. We present a reliable, crowdsourced framework for scalably annotating RC datasets with derivations. We create and publicly release the R4C dataset, the first, quality-assured dataset consisting of 4.6k questions, each of which is annotated with 3 reference derivations (i.e. 13.8k derivations). Experiments show that our automatic evaluation metrics using multiple reference derivations are reliable, and that R4C assesses different skills from an existing benchmark.", "doc_id": "ad78a2550e55d534d191fd728981f13e", "publication_year": 2020, "sentences": ["recent studies have revealed that reading comprehension ( rc ) systems learn to exploit annotation artifacts and other biases in current datasets .", "this prevents the community from reliably measuring the progress of rc systems .", "to address this issue , we introduce r4c , a new task for evaluating rc systems \u2019 internal reasoning .", "r4c requires giving not only answers but also derivations : explanations that justify predicted answers .", "we present a reliable , crowdsourced framework for scalably annotating rc datasets with derivations .", "we create and publicly release the r4c dataset , the first , quality - assured dataset consisting of 4 . 6k questions , each of which is annotated with 3 reference derivations ( i . e . 13 . 8k derivations ) .", "experiments show that our automatic evaluation metrics using multiple reference derivations are reliable , and that r4c assesses different skills from an existing benchmark ."], "events": [{"event_type": "ITT", "arguments": [{"text": "reading comprehension ( rc ) systems", "nugget_type": "APP", "argument_type": "Target", "tokens": ["reading", "comprehension", "systems"], "offsets": [5, 6, 10]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [11]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [41]}, {"text": "r4c", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["r4c"], "offsets": [43]}, {"text": "evaluating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluating"], "offsets": [49]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [42]}}, {"event_type": "PUR", "arguments": [{"text": "rc systems \u2019 internal reasoning", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["rc", "systems", "\u2019", "internal", "reasoning"], "offsets": [50, 51, 52, 53, 54]}], "trigger": {"text": "evaluating", "tokens": ["evaluating"], "offsets": [49]}}, {"event_type": "MDS", "arguments": [{"text": "answers", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["answers"], "offsets": [61]}, {"text": "explanations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["explanations"], "offsets": [66]}, {"text": "that justify predicted answers", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["that", "justify", "predicted", "answers"], "offsets": [67, 68, 69, 70]}], "trigger": {"text": "giving", "tokens": ["giving"], "offsets": [58]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [72]}, {"text": "reliable , crowdsourced framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["reliable", ",", "crowdsourced", "framework"], "offsets": [75, 76, 77, 78]}, {"text": "annotating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["annotating"], "offsets": [81]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [73]}}, {"event_type": "PUR", "arguments": [{"text": "rc datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["reading", "comprehension", "datasets"], "offsets": [5, 6, 83]}, {"text": "with derivations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "derivations"], "offsets": [84, 85]}], "trigger": {"text": "annotating", "tokens": ["annotating"], "offsets": [81]}}, {"event_type": "FAC", "arguments": [{"text": "from an existing benchmark", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "an", "existing", "benchmark"], "offsets": [150, 151, 152, 153]}, {"text": "r4c", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["r4c"], "offsets": [146]}, {"text": "different skills", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["different", "skills"], "offsets": [148, 149]}], "trigger": {"text": "assesses", "tokens": ["assesses"], "offsets": [147]}}, {"event_type": "RWF", "arguments": [{"text": "prevents", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["prevents"], "offsets": [24]}, {"text": "community", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["community"], "offsets": [26]}], "trigger": {"text": "prevents", "tokens": ["prevents"], "offsets": [24]}}, {"event_type": "PUR", "arguments": [{"text": "progress of rc systems", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["progress", "of", "reading", "comprehension", "systems"], "offsets": [31, 32, 5, 6, 34]}], "trigger": {"text": "reliably measuring", "tokens": ["reliably", "measuring"], "offsets": [28, 29]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [87]}, {"text": "r4c dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["r4c", "dataset"], "offsets": [93, 94]}], "trigger": {"text": "create and publicly release", "tokens": ["create", "and", "publicly", "release"], "offsets": [88, 89, 90, 91]}}, {"event_type": "FIN", "arguments": [{"text": "assesses", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["assesses"], "offsets": [147]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [131]}}, {"event_type": "FAC", "arguments": [{"text": "automatic evaluation metrics", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["automatic", "evaluation", "metrics"], "offsets": [134, 135, 136]}, {"text": "using multiple reference derivations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "multiple", "reference", "derivations"], "offsets": [137, 138, 139, 140]}], "trigger": {"text": "reliable", "tokens": ["reliable"], "offsets": [142]}}], "document": ["recent", "studies", "have", "revealed", "that", "reading", "comprehension", "(", "rc", ")", "systems", "learn", "to", "exploit", "annotation", "artifacts", "and", "other", "biases", "in", "current", "datasets", ".", "this", "prevents", "the", "community", "from", "reliably", "measuring", "the", "progress", "of", "rc", "systems", ".", "to", "address", "this", "issue", ",", "we", "introduce", "r4c", ",", "a", "new", "task", "for", "evaluating", "rc", "systems", "\u2019", "internal", "reasoning", ".", "r4c", "requires", "giving", "not", "only", "answers", "but", "also", "derivations", ":", "explanations", "that", "justify", "predicted", "answers", ".", "we", "present", "a", "reliable", ",", "crowdsourced", "framework", "for", "scalably", "annotating", "rc", "datasets", "with", "derivations", ".", "we", "create", "and", "publicly", "release", "the", "r4c", "dataset", ",", "the", "first", ",", "quality", "-", "assured", "dataset", "consisting", "of", "4", ".", "6k", "questions", ",", "each", "of", "which", "is", "annotated", "with", "3", "reference", "derivations", "(", "i", ".", "e", ".", "13", ".", "8k", "derivations", ")", ".", "experiments", "show", "that", "our", "automatic", "evaluation", "metrics", "using", "multiple", "reference", "derivations", "are", "reliable", ",", "and", "that", "r4c", "assesses", "different", "skills", "from", "an", "existing", "benchmark", "."]}]