| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:05:00.260792Z" |
| }, |
| "title": "Automatic Term Extraction from Newspaper Corpora: Making the Most of Specificity and Common Features", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Drouin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "succ. Centre-ville Montr\u00e9al (Qu\u00e9bec", |
| "location": { |
| "postCode": "H3C 3J7", |
| "country": "CANADA" |
| } |
| }, |
| "email": "patrick.drouin@umontreal.ca" |
| }, |
| { |
| "first": "Jean-Beno\u00eet", |
| "middle": [], |
| "last": "Morel", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "succ. Centre-ville Montr\u00e9al (Qu\u00e9bec", |
| "location": { |
| "postCode": "H3C 3J7", |
| "country": "CANADA" |
| } |
| }, |
| "email": "jean-benoit.morel@umontreal.ca" |
| }, |
| { |
| "first": "Marie-Claude", |
| "middle": [], |
| "last": "L'homme", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "succ. Centre-ville Montr\u00e9al (Qu\u00e9bec", |
| "location": { |
| "postCode": "H3C 3J7", |
| "country": "CANADA" |
| } |
| }, |
| "email": "mc.lhomme@umontreal.ca" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The first step of any terminological work is to setup a reliable, specialized corpus composed of documents written by specialists and then to apply automatic term extraction (ATE) methods to this corpus in order to retrieve a first list of potential terms. In this paper, the experiment we describe differs from this usual process. The corpus used for this study was built from newspaper articles retrieved from the Web using a short list of keywords. The general intuition on which this research is based is that ATE based corpus comparison techniques can be used to capture both similarities and dissimilarities between corpora. The former are exploited through a termhood measure and the latter through word embeddings. Our initial results were validated manually and show that combining a traditional ATE method that focuses on dissimilarities between corpora to newer methods that exploit similarities (more specifically distributional features of candidates) leads to promising results.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The first step of any terminological work is to setup a reliable, specialized corpus composed of documents written by specialists and then to apply automatic term extraction (ATE) methods to this corpus in order to retrieve a first list of potential terms. In this paper, the experiment we describe differs from this usual process. The corpus used for this study was built from newspaper articles retrieved from the Web using a short list of keywords. The general intuition on which this research is based is that ATE based corpus comparison techniques can be used to capture both similarities and dissimilarities between corpora. The former are exploited through a termhood measure and the latter through word embeddings. Our initial results were validated manually and show that combining a traditional ATE method that focuses on dissimilarities between corpora to newer methods that exploit similarities (more specifically distributional features of candidates) leads to promising results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The first step of any terminological work is to setup a reliable, specialized corpus composed of documents written by specialists. It is usually assumed that only domain-specific corpora compiled according to criteria defined by terminologists can represent good starting points for terminological description. This is especially true when relying on automatic term extraction (ATE) tools as the quality of the output is in direct relation to the quality of the input. However, these \"ideal\" requirements are not always met in certain fields of knowledge. This is the case of the domain explored in this work, i.e. problematic behavior in the workplace. Its terminology can be disseminated in various forms of textual genres, including unspecialized corpora. Extracting terminology from unspecialized corpora raises new challenges for ATE since most tools and methodologies are built around the assumption that the corpora being processed are specialized. Tools and methodologies thus tend to target features specific to this type of corpora. One efficient strategy for spotting domain-specific terms consists in comparing the behavior of the lexicon of a specialized corpus (an analysis corpus, AC) to the behavior of the lexicon in a general language corpus (a reference corpus, RC), thus exploiting the difference between text genres. Such a technique has proved efficient for extracting relevant and interesting term candidates. One question remains however: Can we expect this comparison method to yield similar results when comparing corpora that belong to the same genre or when using an analysis corpus that is unspecialized? We believe that, although still useful, the method would need to be complemented with further processing. This paper presents an automatic term extraction experiment carried out on a newspaper corpus that contains texts that address directly or indirectly the topic of discrimination. We first explore the results of a hybrid corpus comparison ATE experiment and propose new techniques in order to increase the precision of the results obtained. We believe that the proposed approach is useful to tackle ATE from unspecialized corpora and that the underlying ideas can be used for ATE in other situations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "For the project described in this paper, we have been working with a private company (Valital 1 ) whose core business is the real-time online analysis of job candidates behavior and the automated confirmation of their work experience. Their process digs into various sources of information with the aim of defining a textual profile for different kinds of misconduct in the workplace. Among these sources, are newspaper articles dealing with problematic behavior (e.g. violence, discrimination), but most articles do not concern the workplace as such. One of the tasks assigned to our team was to capture the terminological profile for each of these behaviors. This terminological profile was to be implemented in an ontology at a later stage. From a terminological standpoint, newspaper articles are \"atypical\" textual sources since they are targeted at the general public. Even if these articles were automatically filtered according to the topic they address based on a short list of keywords, they may or may not concern the workplace as such. In other words, articles can report on a discrimination situation, but this situation could have taken place anywhere. The challenge in this case was to be able to locate relevant terms in an unspecialized corpus. Our task involved an additional less typical aspect. The terminology related to misconduct includes various types of terms such as verbs (e.g. discriminate), adjectives (e.g. discriminatory) or single-word predicative nouns (e.g. discrimination). The term extraction method needed to be able to identify single-word terms and terms that belong to different parts of speech.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The task", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Different methods were devised to identify terminology and such methods are now well-established and used for different applications (Indurkhya and Damerau, 2010) . Automatic term extraction (ATE) methods are usually categorized as linguistic, statistical or hybrid. The first techniques rely on linguistic descriptions (grammars, dictionaries, surface patterns), while statistical methods rely on information like frequency and co-occurrence, etc. In the last 20 years, most tools use both statistical and linguistic information and fall into the hybrid category. The tools try to evaluate how interesting items extracted are for terminologists, leading to various methods for calculating termhood (Kageura and Umino, 1996) . Among the three traditional categories, hybrid methods were evaluated as those that led to better results (Macken et al., 2013) . But in the last few years, the research field of ATE has undergone profound changes. Progress in machine learning and more specifically in deep learning has lead to methodologies which cannot be easily described using the three traditional categories (Rigouts-Terryn et al., 2020) . In this work, we will explore a traditional hybrid method that compares compora and combine it with more recent techniques such as word embeddings (Mikolov et al., 2013; Amjadian et al., 2016; Kucza et al., 2018; Qasemizadeh and Handschuh, 2014; Pollak et al., 2019) . Our work is similar to (H\u00e4tty et al., 2019) as far as the method is concerned. However, our aim is to identify terms in unspecialized corpora. Given this, we cannot only target changes in meaning or reduction of number of attested meanings in a specialized corpus when compared to a general one. We take the opposite approach and attempt to spot potential meaning similarities to remove candidates that would be very similar regardless of the corpora. An efficient method for ATE consists of comparing a domain-specific corpus (an analysis corpus, AC) to a general one (a reference corpus, RC) and computing a specificity score for lemmas. For instance, a corpus of English texts dealing with the topic of climate change can be compared to a general balanced corpus such as the British National Corpus. This method was implemented in TermoStat described in (Drouin, 2003) . It was evaluated for the extraction of single-word terms with satisfactory results (Lemay et al., 2005) and supports multiple languages 2 . The concept of \"specificity\" aims to capture the potential of term candidates to behave like terms (termhood). In most cases, termhood is linked to a higher than expected frequency in a specialized corpus based on a theoretical frequency computed from a general corpus. Various statistical measures can be used to compute specificity. When comparing corpora of different genres, terms ranking high retrieved from the AC usually correspond to terms. When the analysis corpus is less specialized (even if its content is topic-specific), it is to be expected that the strong opposition between corpora is lost. We can no longer focus on the single assumption that there is a high level of divergence in the way words behave in the AC and the RC as 2 http://termostat.ling.umontreal.ca, (Drouin, 2020) in (H\u00e4tty et al., 2019) . This work addresses this problem and suggests a method that could still make the most of the terminological content of the AC even if it belongs to a text genre that is the same or very similar to that of the RC.", |
| "cite_spans": [ |
| { |
| "start": 133, |
| "end": 162, |
| "text": "(Indurkhya and Damerau, 2010)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 699, |
| "end": 724, |
| "text": "(Kageura and Umino, 1996)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 833, |
| "end": 854, |
| "text": "(Macken et al., 2013)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1108, |
| "end": 1137, |
| "text": "(Rigouts-Terryn et al., 2020)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1287, |
| "end": 1309, |
| "text": "(Mikolov et al., 2013;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1310, |
| "end": 1332, |
| "text": "Amjadian et al., 2016;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1333, |
| "end": 1352, |
| "text": "Kucza et al., 2018;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1353, |
| "end": 1385, |
| "text": "Qasemizadeh and Handschuh, 2014;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1386, |
| "end": 1406, |
| "text": "Pollak et al., 2019)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1432, |
| "end": 1452, |
| "text": "(H\u00e4tty et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 2266, |
| "end": 2280, |
| "text": "(Drouin, 2003)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 2366, |
| "end": 2386, |
| "text": "(Lemay et al., 2005)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 3206, |
| "end": 3220, |
| "text": "(Drouin, 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 3224, |
| "end": 3244, |
| "text": "(H\u00e4tty et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "3." |
| }, |
| { |
| "text": "In this paper, we are dealing with corpora that belong to the same genre even though one of the corpora covers a broader spectrum of topics. Our hypotheses are:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hypotheses", |
| "sec_num": "4." |
| }, |
| { |
| "text": "\u2022 A traditional approach to ATE based on frequency comparison can still be used to locate relevant terminology. In other words, the dissimilarity between the topics of the two corpora can still be exploited by an automatic term extraction method (Hypothesis 1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hypotheses", |
| "sec_num": "4." |
| }, |
| { |
| "text": "\u2022 However, given the fact that textual genres are quite similar, it is likely that a number of tokens will need to be filtered (probably more that usual). One strategy consists in using some of the features shared by both corpora to further refine term extraction. We can exploit the fact that some words have a similar behavior in the two corpora and use this feature to filter out the results obtained by simple corpus comparison. This method is likely to increase precision. However, in order to capture this behavior, we need to go beyond frequency measures and model semantic features in some way, e.g. using distributional information and word embeddings. Thus, the similarities between the corpora are also useful and can be exploited with distributional analysis and word embeddings (Hypothesis 2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hypotheses", |
| "sec_num": "4." |
| }, |
| { |
| "text": "The main idea behind (1) is that, since our AC is limited to one topic, specificity can be used to retrieve term candidates (TC). In contrast, since both the AC and the RC are comparable from a text genre point of view, in (2) we want to capture the fact that some items that might be retrieved by the specificity carry meanings that do not contrast sharply with the ones they convey in general language corpora. In order to do so, we will compare word embeddings built from our AC and freely available prebuilt embeddings. This comparison will be used to filter out the results obtained based on (1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hypotheses", |
| "sec_num": "4." |
| }, |
| { |
| "text": "The overall process is illustrated in Figure 1 . The regular approach to term extraction when comparing corpora is represented by the stages in light blue. The analysis and reference corpora are preprocessed; term extraction is performed using specificity scores; finally, term candidates are ranked according to the score they obtained. We are adding a layer (steps in light yellow) designed to compare word embeddings in order to re-rank the output produced by steps in blue.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 38, |
| "end": 46, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "5." |
| }, |
| { |
| "text": "Basic preprocessing is applied to both the AC and the RC. All files from the corpora are tokenized, then tagged and lemmatized using TreeTagger (Schmid, 1994) . The Treetagger format is used as a common input for subsequent tasks. ", |
| "cite_spans": [ |
| { |
| "start": 144, |
| "end": 158, |
| "text": "(Schmid, 1994)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpora", |
| "sec_num": "5.1." |
| }, |
| { |
| "text": "The corpus that was built by our partner comprises several text documents dealing with unwanted behavior from potential employees: Addiction, Discrimination, Fraud, Harassment, and Violence. It is important to mention that all files in the corpus were retrieved automatically from the web based on a short list of keywords related to each of these topics. All files come from online Canadian English newspapers and have been preprocessed to remove HTML markup. Since the crawling process was keyword based, the various corpora are noisy and thus do not lend themselves easily to standard term extraction process. In this work, we will focus solely on the Discrimination corpus as work on this topic is more advanced than for the other topics. The corpus contains 1,541,987 tokens.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ". Analysis Corpus", |
| "sec_num": null |
| }, |
| { |
| "text": "The reference corpus used was built from subsets of two large corpora: the British National Corpus (BNC) (Consortium, 2007) and the American National Corpus (ANC) (Reppen et al., 2005) . We extracted 4M tokens from each of these corpora in order to compile our 8M tokens reference corpus. In both cases, only newpaper texts were retrieved.", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 123, |
| "text": "(Consortium, 2007)", |
| "ref_id": null |
| }, |
| { |
| "start": 163, |
| "end": 184, |
| "text": "(Reppen et al., 2005)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reference Corpus", |
| "sec_num": "5.1.2." |
| }, |
| { |
| "text": "The extraction process was limited to single-word lexical items including nouns, verbs and adjectives, since, as was mentioned above, important concepts in this field can be expressed with terms that belong to different parts of speech. TermoStat computes a Specificity score to represent how far the frequency in the specialized corpus deviates from a theoretical frequency. Its calculation relies on an approximation of the binomial distribution using standard normal distribution. In order to do so, a measure proposed by Lafon (1980) is used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Term Extraction", |
| "sec_num": "5.2." |
| }, |
| { |
| "text": "Using values from Table 1 , specificity can be calculated as follows:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 18, |
| "end": 25, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Term Extraction", |
| "sec_num": "5.2." |
| }, |
| { |
| "text": "log P(X=b) = log (a+b)! + log (N-(a+b))! + log (b+d)! + log (N-(b+d))! -log N! -log b! -log ((a+b)-b)! -log ((b+d)-b)! -log (N-(a+b)-(b+d)+b)!", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Term Extraction", |
| "sec_num": "5.2." |
| }, |
| { |
| "text": "This measure was tested in previous studies (Lemay et al., 2005 , 2008; Drouin et al., 2018) and leads to excellent results for the extraction of both single-word and multiword terms. Specificity can be used to spot items that are both over-and under-represented in a corpus. In the case of terminology, a domain-and genre-oriented lexicon, we are solely interested in positive specificities which highlight items that are over-represented in the AC.", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 63, |
| "text": "(Lemay et al., 2005", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 64, |
| "end": 71, |
| "text": ", 2008;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 72, |
| "end": 92, |
| "text": "Drouin et al., 2018)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Term Extraction", |
| "sec_num": "5.2." |
| }, |
| { |
| "text": "Since the specificity scores cannot be represented on a predefined scale, for the current experiment, we expressed them on a scale ranging from 0 to 1 where the max specificity score is mapped to 1. This mapping, which does not impact the overall distribution of scores, leads to a less granular representation of the scores and a more flexible set of scores to assess. The specificity score is used to test hypothesis 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Term Extraction", |
| "sec_num": "5.2." |
| }, |
| { |
| "text": "To build embeddings for our AC, we used the word2vec (Mikolov et al., 2013) implementation included in Gensim (\u0158eh\u016f\u0159ek and Sojka, 2010). We used default values for the skipgram algorithm with a window of 5 words, a minimum frequency threshold of 5 and 300 dimensions for the vectors.", |
| "cite_spans": [ |
| { |
| "start": 53, |
| "end": 75, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Computed Word Embeddings", |
| "sec_num": "5.3.1." |
| }, |
| { |
| "text": "To compare the behavior of tokens in a large unspecialized language corpus, we used the pre-trained word GloVe embeddings (Pennington et al., 2014) . More specifically, we used the Common Crawl embeddings built from 42B tokens with a 1.9M vocabulary (uncased) and. The embeddings' vectors have 300 dimensions.", |
| "cite_spans": [ |
| { |
| "start": 122, |
| "end": 147, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-trained Word Embeddings", |
| "sec_num": "5.3.2." |
| }, |
| { |
| "text": "Since our embeddings and the GloVe embeddings are built from different corpora and we want to be able to compare the vectors for words in both of them, the embeddings must be aligned. In order to do this, we used the technique proposed by (Hamilton et al., 2016) based on the code provided by Tyler Angert and available from his GitHub 3 . Such an approach is been used in (H\u00e4tty et al., 2019) to compare vectors between corpora for term extraction. During the alignment process, only the shared vocabulary 4 between embeddings is kept.", |
| "cite_spans": [ |
| { |
| "start": 239, |
| "end": 262, |
| "text": "(Hamilton et al., 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 373, |
| "end": 393, |
| "text": "(H\u00e4tty et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Alignment of Word Embeddings", |
| "sec_num": "5.3.3." |
| }, |
| { |
| "text": "Words with similar behaviors in a large unspecialized corpus (Glove embeddings in our case) and our AC (our corpus built embeddings) are assumed to carry the same meanings based on the distributional features/patterns captured by the embeddings. From this idea we can use a simple cosine distance to compare vectors. Similar vectors will lead to cosine distance closer to 0 and dissimilar vectors to values closer to 1. We represent the distance using a score called GloveDist. What is of interest to us is to lower the Specificity score for TCs whose distributional behavior is the same in both corpora. The rationale behind this strategy is that even though the Specificity score seems to indicate that TCs are valid terms, their overall meaning is the same. We thus factor this information in a new score called DistSpecificity which is used to test our hypothesis 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "DistSpecificity", |
| "sec_num": "5.4." |
| }, |
| { |
| "text": "Using this score, the Specificity score of a very specific TC that has almost the same distributional behavior in both corpora will be closer to 0 (since GloveDist will tend towards 0). On the other hand, a dissimilar behavior in both corpora will not impact Specificity as such (since GloveDist will have a value closer to 1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "DistSpecificity = GloveDist*Specificity", |
| "sec_num": null |
| }, |
| { |
| "text": "All results were manually validated by a terminologist who has been involved in the project from the start. For the purpose of the current experiment, we are mainly interested in the potential of our score to rank valid terms at the top of the list of term candidates. Our manual validation was limited to the first 250 TCs retrieved using each of our three scores (Specificity, GloveDist and DistSpecificity) ranked from the highest to the lowest value. We thus validated a total of 750 TCs. As can be seen in Table 3 , some TCs could appear in two or three lists. The criteria used for the validation of TC were the following:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 511, |
| "end": 518, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Validation", |
| "sec_num": "5.5." |
| }, |
| { |
| "text": "1. Terms must appear in contexts that are meaningful according to our task;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Validation", |
| "sec_num": "5.5." |
| }, |
| { |
| "text": "2. Terms must appear in at least 10 knowledge-rich contexts (KRC) (Meyer, 2001 ) related to discrimination;", |
| "cite_spans": [ |
| { |
| "start": 66, |
| "end": 78, |
| "text": "(Meyer, 2001", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Validation", |
| "sec_num": "5.5." |
| }, |
| { |
| "text": "3. TCs can also be considered terms if they hold syntagmatic or paradigmatic relations (e.g., as synonymy, antonymy or argumental) with already validated terms. (L'Homme, 2020).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Validation", |
| "sec_num": "5.5." |
| }, |
| { |
| "text": "What we define as a meaningful context (Criteria 1) is a context in which a misconduct is described. Even though some TCs could appear in an important number of contexts, we selected to base our study on KRCs only (Criteria 2). This methodological decision makes our validation process more challenging but our results more interesting.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Validation", |
| "sec_num": "5.5." |
| }, |
| { |
| "text": "The following sentence is a good example of a KRC for TCs such as race or religion: In New York State, we have no tolerance for discrimination on the basis of race, religion, sex, national origin, disability, sexual orientation or perceived sexual orientation. KRCs provide insights on how TCs can be linked to each other in a specific domain. In this KRC, it shows us how race and religion can be linked to discrimination (also a TC) in our domain. In addition to meeting the above-mentioned criteria, some TCs were also validated according to Criteria 3. For example, anti-discriminatory was labelled as a term on the basis of being an antonym for discriminatory; woman on the basis of being an argument of verbs such as discriminate or predicative nouns such as discrimination. Both TCs meet the other criteria as well. The validation process was challenging due to the fact that often TCs did not convey a very technical meaning in the AC, i.e. a meaning that one could easily distinguish from general usage. Our approach was to consider TCs that were relevant according to the topic of discrimination and this \"relevance\" was constantly refined as we skimmed through the list of candidates. TCs that met the previous criteria were labelled as Term;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Validation", |
| "sec_num": "5.5." |
| }, |
| { |
| "text": "TCs that did not meet these criteria as Non-Term; and TCs that we had doubts about as Uncertain (see Tables 5 to 7) .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 101, |
| "end": 115, |
| "text": "Tables 5 to 7)", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Validation", |
| "sec_num": "5.5." |
| }, |
| { |
| "text": "As can be seen from the precision values in Table 2 , ATE on unspecialized corpora is not a trivial process. We provide two precision measures for each score. Precision 1 is obtained by dividing the total number of valid TCs by 250 (the total in our lists) while Precision 2 corresponds to the number of valid TCs evaluated on the set of TC that we could validate (ignoring the TCs classified as Uncertain from the calculation). Values obtained by both measures are quite low, but not to the point of making the ATE extraction useless. Recall was not evaluated for this experiment since we do not have a gold standard that can be used and a manual evaluation of recall on newspaper corpora does not serve a larger purpose for the time being. The main issue with a task like the one we describe in this paper is still reaching acceptable precision values. Table 2 : Precision values for all 3 scores", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 44, |
| "end": 51, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 855, |
| "end": 862, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "6." |
| }, |
| { |
| "text": "Since we are more interested in the potential of each score to rank the valid information at the top of the list presented to the terminologist, we can evaluate precision at each position in the TC lists. This information is provided in Figure 2 which shows the precision values obtained by the three scores (Specificity, GloveDist and DistSpecificity) over the whole list. For these scores, entries identified as Uncertain were considered as errors, we are thus using the Precision 1 . We can easily see that GloveDist does not perform as expected. This means that using solely distributional information from a large unspecialized corpus as captured by GloVe embeddings and comparing them to our local vectors is not sufficient in itself. The distance between the vectors does not allow us to distinguish Terms from Non-Terms. Specificity presents a somewhat stable curve which means that valid TCs are distributed evenly along the list of 250 TCs. These results show that Specificity remains an interesting score to identify potential terms in unspecialized corpora by comparing them to larger unspecialized corpus. On the other hand, Figure 2 shows that it is not the best score to maximize valid TCs at the top of list. As mentioned earlier, our DistSpecificity score combines both Specificity and GloveDist, the idea being to lower the importance of TCs that have a high Specificity but a similar behavior in both our corpora and the corpus used to build the GloVe embeddings. Figure 2 shows that this seems to be the case as precision values for DistSpecificity are higher for an important part of the list of TCs (until we reach candidate 165). Table 6 shows some of the unique contributions of the scores. Once again in this context we can oberve the influence of the nature of the corpus on the TCs retained: advertisement, robot, view, request, verify, etc.. Such TCs were again more present in the first TCs proposed by the Specificity score which means that DistSpecificity was, to some extent, succesfull in re-ranking them. Table 7 contains the TCs that were most positively affected by the re-ranking. Although some results can be explained by the content of the documents that make up the corpus Table 7 : Top positive re-ranking of Specificity by Dist-Specificity (university, department), some are quite puzzling (try, be) and need to be investigated further. Since the AC is made of newspaper articles, academics who study the phenomenon of discrimination are often quoted and it explains the strong presence of the former in our corpus. However, it does not explain why their distributional features are so different in the two corpora. This will also be subjected to further investigation. At the other end of the spectrum are the results contained in Table 8 which include the TCs that have been negatively re-ranked by DistSpecificity. As we mentioned earlier, the good news is that this score is able to capture the fact that some TCs that are more closely related to the Web than the subject matter of the corpus and lower their termhood lowered (click, promotion, etc.). Nonetheless, some valid terms are being affected quite strongly while they should not be (advocate, orientation, bias, bar). In some cases (bar, hate, orientation), it seems that polysemy can be a factor affecting the quality of the results.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 237, |
| "end": 246, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1139, |
| "end": 1147, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1484, |
| "end": 1492, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1654, |
| "end": 1661, |
| "text": "Table 6", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 2040, |
| "end": 2047, |
| "text": "Table 7", |
| "ref_id": null |
| }, |
| { |
| "start": 2214, |
| "end": 2221, |
| "text": "Table 7", |
| "ref_id": null |
| }, |
| { |
| "start": 2775, |
| "end": 2782, |
| "text": "Table 8", |
| "ref_id": "TABREF9" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Score", |
| "sec_num": null |
| }, |
| { |
| "text": "All experiment results were evaluated by a single terminologist and limited to the first 250 TCs provided by each score. Working with a larger sample and a team of validators would allow us to test inter-annotator agreement over a larger sample. For the current task we limited our investigation to singleword TCs. We believe our findings could be applied to multiword TCs in order to see if we can corroborate the results obtained here. In order to do so, we would need to conduct an experiment using word embeddings that can capture distributional information from multiword TCs. Relying on more recent (and more complex) embeddings algorithms would also help to capture contexts in which TCs are used and perhaps mitigate the effects of polysemy observed in out results. An interesting extension of the method presented here would be to apply it other genres of unspecialized corpora such as texts retrieved from social media. Some social platforms such as Twitter and Reddit host communities of specialists. These specialists exchange knowledge in informal settings and the terms carrying this knowledge should be described.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Future Work", |
| "sec_num": "7." |
| }, |
| { |
| "text": "Provided that our results can be replicated in larger settings, integrating our method into the compilation process of terminological resources and into our term extraction tool would be beneficial.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Future Work", |
| "sec_num": "7." |
| }, |
| { |
| "text": "In this paper we proposed a method for extracting terminology from unspecialized corpora. Our first hypothesis was that traditional corpus comparison techniques could be used in such a task in order to capture the dissimilarity between the topics of the two corpora. We have verified that this is the case and that the results of such a technique could still be used in terminology work although they are noisy. Our second hypothesis was that the similarities between the corpora are also useful and can be exploited with distributional analysis and word embeddings. To test our second hypothesis we devised a new way to re-rank TCs provided by a classic corpus comparison method in an effort to compare distributional features of TCs in our unspecialized corpus to those observed in a general langue corpus. Using this technique leads to very good results, as far as we could tell from the first part of a list of candidate terms. For terminologists, this method would allow them to focus on more relevant terms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8." |
| }, |
| { |
| "text": "https://www.valital.com", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://gist.github.com/tangert/106822a0f56f8308db3f1d77be2c7942 4 By shared vocaulary, we mean words that are common to both embeddings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://ivado.ca/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The authors would like to thank Valital for working with our team on this project and for the corpora they provided as well as IVADO (Institut de valorisation des donn\u00e9es) 5 for making this possible through its financial support.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "9." |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Local-Global Vectors to Improve Unigram Terminology Extraction", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Amjadian", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Paribakht", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Faez", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 5th International Workshop on Computational Terminology", |
| "volume": "", |
| "issue": "", |
| "pages": "2--11", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amjadian, E., Inkpen, D., Paribakht, T., and Faez, F. (2016). Local-Global Vectors to Improve Unigram Ter- minology Extraction. In Proceedings of the 5th Interna- tional Workshop on Computational Terminology, pages 2-11, Osaka, Japan.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Quantifying termhood through corpus comparison", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Drouin", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Doll", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Terminology and Knowledge Engineering (TKE-2008)", |
| "volume": "", |
| "issue": "", |
| "pages": "191--206", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Drouin, P. and Doll, F. (2008). Quantifying termhood through corpus comparison. In Terminology and Knowl- edge Engineering (TKE-2008), pages 191-206, Copen- hague, Danemark, Ao\u00fbt. Copenhagen Business School, Copenhagen, Copenhagen Business School, Copen- hagen.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "\u00c9valuation du potentiel terminologique de candidats termes", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Drouin", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Langlais", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Actes des 8es Journ\u00e9es internationales d'Analyse statistique des Donn\u00e9es Textuelles. (JADT 2006)", |
| "volume": "", |
| "issue": "", |
| "pages": "389--400", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Drouin, P. and Langlais, P. (2006).\u00c9valuation du po- tentiel terminologique de candidats termes. In Actes des 8es Journ\u00e9es internationales d'Analyse statistique des Donn\u00e9es Textuelles. (JADT 2006), pages 389-400, Besan\u00e7on, France.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Lexical profiling of environmental corpora", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Drouin", |
| "suffix": "" |
| }, |
| { |
| "first": "M.-C", |
| "middle": [], |
| "last": "L'homme", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Robichaud", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "3419--3425", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Drouin, P., L'Homme, M.-C., and Robichaud, B. (2018). Lexical profiling of environmental corpora. In Nicoletta Calzolari (Conference chair), et al., editors, Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), pages 3419- 3425, Paris, France, May. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Term extraction using non-technical corpora as a point of leverage", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Drouin", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Terminology", |
| "volume": "9", |
| "issue": "1", |
| "pages": "99--115", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Drouin, P. (2003). Term extraction using non-technical corpora as a point of leverage. Terminology, 9(1):99- 115.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Termhood experiments: quantifying the relevance of candidate terms", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Drouin", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Modern Approaches to Terminological Theories and Applications", |
| "volume": "36", |
| "issue": "", |
| "pages": "375--391", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Drouin, P. (2006). Termhood experiments: quantifying the relevance of candidate terms. Modern Approaches to Terminological Theories and Applications, 36:375-391.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Diachronic Word Embeddings Reveal Statistical Laws of Semantic Change", |
| "authors": [], |
| "year": null, |
| "venue": "Proc. Assoc. Comput. Ling. (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diachronic Word Embeddings Reveal Statistical Laws of Semantic Change. In Proc. Assoc. Comput. Ling. (ACL).", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "SURel: A gold standard for incorporating meaning shifts into term extraction", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "H\u00e4tty", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Schlechtweg", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Schulte Im Walde", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Eighth Joint Conference on Lexical and Computational Semantics (*SEM 2019)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H\u00e4tty, A., Schlechtweg, D., and Schulte im Walde, S. (2019). SURel: A gold standard for incorporating mean- ing shifts into term extraction. In Proceedings of the Eighth Joint Conference on Lexical and Computational Semantics (*SEM 2019), pages 1-8, Minneapolis, Min- nesota, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Handbook of Natural Language Processing, Second Edition. Chapman & Hall/CRC machine learning & pattern recognition series", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Indurkhya", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Damerau", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Indurkhya, N. and Damerau, F. (2010). Handbook of Nat- ural Language Processing, Second Edition. Chapman & Hall/CRC machine learning & pattern recognition series. CRC Press.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Methods of automatic term recognition", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Kageura", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Umino", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Terminology", |
| "volume": "3", |
| "issue": "2", |
| "pages": "259--289", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kageura, K. and Umino, B. (1996). Methods of automatic term recognition. Terminology, 3(2):259-289.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Term Extraction via Neural Sequence Labeling a Comparative Evaluation of Strategies Using Recurrent Neural Networks", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kucza", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Niehues", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Zenkel", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Waibel", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "St\u00fcker", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "2072--2076", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kucza, M., Niehues, J., Zenkel, T., Waibel, A., and St\u00fcker, S. (2018). Term Extraction via Neural Sequence Label- ing a Comparative Evaluation of Strategies Using Recur- rent Neural Networks. In Interspeech 2018, pages 2072- 2076, Hyderabad, India, September. ISCA.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Two methods for extracting specific single-word terms from specialized corpora: Experimentation and evaluation", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Lemay", |
| "suffix": "" |
| }, |
| { |
| "first": "M.-C", |
| "middle": [], |
| "last": "L'homme", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Drouin", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "International Journal of Corpus Linguistics", |
| "volume": "10", |
| "issue": "2", |
| "pages": "227--255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lemay, C., L'Homme, M.-C., and Drouin, P. (2005). Two methods for extracting specific single-word terms from specialized corpora: Experimentation and evaluation. International Journal of Corpus Linguistics, 10(2):227- 255.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Lexical Semantics for Terminology: An introduction", |
| "authors": [ |
| { |
| "first": "M.-C", |
| "middle": [], |
| "last": "L'homme", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "L'Homme, M.-C. (2020). Lexical Semantics for Terminol- ogy: An introduction. John Benjamins.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "TExSIS: Bilingual Terminology Extraction from Parallel Corpora Using Chunk-based Alignment", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Macken", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Lefever", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Hoste", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Terminology", |
| "volume": "19", |
| "issue": "1", |
| "pages": "1--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Macken, L., Lefever, E., and Hoste, V. (2013). TExSIS: Bilingual Terminology Extraction from Parallel Corpora Using Chunk-based Alignment. Terminology, 19(1):1- 30.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Extracting knowledge-rich contexts for terminography", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Meyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Recent advances in computational terminology", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Meyer, I. (2001). Extracting knowledge-rich contexts for terminography. In Didier Bourigault, et al., editors, Re- cent advances in computational terminology, page 279. John Benjamins.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "26", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikolov, T., Sutskever, I., Chen, K., Corrado, G. S., and Dean, J. (2013). Distributed representations of words and phrases and their compositionality. In C. J. C. Burges, et al., editors, Advances in Neural Information Processing Systems 26, pages 3111-3119. Curran Asso- ciates, Inc.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pennington, J., Socher, R., and Manning, C. D. (2014). Glove: Global vectors for word representation. In Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Karst Exploration: Extracting Terms and Definitions from Karst Domain Corpus", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Pollak", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Repar", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Martinc", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Podpe\u010dan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of eLex 2019", |
| "volume": "", |
| "issue": "", |
| "pages": "934--956", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pollak, S., Repar, A., Martinc, M., and Podpe\u010dan, V. (2019). Karst Exploration: Extracting Terms and Def- initions from Karst Domain Corpus. In Proceedings of eLex 2019, pages 934-956, Sintra, Portugal.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Investigating Context Parameters in Technology Term Recognition", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Qasemizadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Handschuh", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of SADAATL 2014", |
| "volume": "", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qasemizadeh, B. and Handschuh, S. (2014). Investigating Context Parameters in Technology Term Recognition. In Proceedings of SADAATL 2014, pages 1-10, Dublin, Ire- land.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Software Framework for Topic Modelling with Large Corpora", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Reh\u016f\u0159ek", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Sojka", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the LREC 2010 Workshop on New Challenges for NLP Frameworks", |
| "volume": "", |
| "issue": "", |
| "pages": "45--50", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reh\u016f\u0159ek, R. and Sojka, P. (2010). Software Frame- work for Topic Modelling with Large Corpora. In Proceedings of the LREC 2010 Workshop on New Challenges for NLP Frameworks, pages 45-50, Val- letta, Malta, May. ELRA. http://is.muni.cz/ publication/884893/en.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Termeval 2020: Shared task on automatic term extraction using the annotated corpora for term extraction research (acter) dataset", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Rigouts-Terryn", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Drouin", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Hoste", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Lefever", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of Computerm 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rigouts-Terryn, A., Drouin, P., Hoste, V., and Lefever, E. (2020). Termeval 2020: Shared task on automatic term extraction using the annotated corpora for term extrac- tion research (acter) dataset. In Proceedings of Comput- erm 2020.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Probabilistic part-of-speech tagging using decision trees", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Schmid", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "International Conference on New Methods in Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "44--49", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Schmid, H. (1994). Probabilistic part-of-speech tagging using decision trees. In International Conference on New Methods in Language Processing, pages 44-49, Manchester, UK.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "British National Corpus, version 3 BNC XML edition. British National Corpus Consortium", |
| "authors": [], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Language Resource References BNC Consortium. (2007). British National Corpus, ver- sion 3 BNC XML edition. British National Corpus Con- sortium, ISLRN 143-765-223-127-3.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "American National Corpus (ANC) Second Release. Linguistic Data Consortium", |
| "authors": [ |
| { |
| "first": "Randi", |
| "middle": [], |
| "last": "Reppen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nancy", |
| "middle": [], |
| "last": "Ide", |
| "suffix": "" |
| }, |
| { |
| "first": "Keith", |
| "middle": [], |
| "last": "Suderman", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reppen, Randi and Ide, Nancy and Suderman, Keith. (2005). American National Corpus (ANC) Second Re- lease. Linguistic Data Consortium, ISLRN 797-978- 576-065-6.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "Overview of the process 5.1.1", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "num": null, |
| "text": "Overall precision of the scores", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "num": null, |
| "text": "", |
| "content": "<table><tr><td colspan=\"3\">details the contributions of each score. It shows that</td></tr><tr><td colspan=\"3\">they share 60 common terms while bringing unique con-</td></tr><tr><td colspan=\"3\">tributions to the overall list of TCs. However, Specificity</td></tr><tr><td colspan=\"2\">locates more valid terms than GloveDist.</td><td/></tr><tr><td/><td colspan=\"2\">Specificity DistSpecificity</td></tr><tr><td>Common</td><td>60</td><td>60</td></tr><tr><td>Unique</td><td>84</td><td>75</td></tr><tr><td>Total</td><td>144</td><td>135</td></tr></table>", |
| "html": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "num": null, |
| "text": "Overall Contribution of Scores for Valid Terms", |
| "content": "<table><tr><td/><td colspan=\"2\">Specificity DistSpecificity</td></tr><tr><td>Common</td><td>50</td><td>50</td></tr><tr><td>Unique</td><td>47</td><td>47</td></tr><tr><td>Total</td><td>97</td><td>97</td></tr></table>", |
| "html": null |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "num": null, |
| "text": "Contribution of Scores for Valid Terms < 165Nearly a third (30%) of the top 165 candidates are common to both scores, the top 15 can be seen inTable 5. One can clearly see by looking at the Non-Terms that the nature of the corpus had an inpact on the results. For example, items", |
| "content": "<table><tr><td>Spec</td><td>Status</td><td colspan=\"2\">DistSpecificity Status</td></tr><tr><td>ms</td><td colspan=\"2\">Non-Term ms</td><td>Non-Term</td></tr><tr><td>law</td><td>Term</td><td>read</td><td>Non-Term</td></tr><tr><td>woman</td><td>Term</td><td>employee</td><td>Term</td></tr><tr><td colspan=\"3\">newsletter Non-Term white</td><td>Term</td></tr><tr><td>lawsuit</td><td>Term</td><td>state</td><td>Term</td></tr><tr><td>story</td><td colspan=\"2\">Non-Term law</td><td>Term</td></tr><tr><td>court</td><td>Term</td><td>file</td><td>Term</td></tr><tr><td colspan=\"2\">employee Term</td><td>hide</td><td>Non-Term</td></tr><tr><td>subscribe</td><td colspan=\"2\">Non-Term court</td><td>Term</td></tr><tr><td>photo</td><td colspan=\"2\">Non-Term bill</td><td>Term</td></tr><tr><td>read</td><td colspan=\"2\">Non-Term case</td><td>Term</td></tr><tr><td>state</td><td>Term</td><td>lawyer</td><td>Term</td></tr><tr><td>case</td><td>Term</td><td>justice</td><td>Term</td></tr><tr><td>lawyer</td><td>Term</td><td>complaint</td><td>Term</td></tr><tr><td>plaintiff</td><td>Term</td><td>religion</td><td>Term</td></tr></table>", |
| "html": null |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "num": null, |
| "text": "Top common TCs such as ms, newsletter, subscribe, hide can be attributed to the fact that the corpus was built from Web pages. The results at the top of the list for DistSpecificity are much better and contain terms relevant to the task at hand.", |
| "content": "<table><tr><td>Spec</td><td>Status</td><td colspan=\"2\">DistSpecificity Status</td></tr><tr><td colspan=\"2\">discrimination Term</td><td>dismissal</td><td>Term</td></tr><tr><td>gender</td><td>Term</td><td>argument</td><td>Term</td></tr><tr><td>percent</td><td colspan=\"2\">Non-Term argue</td><td>Term</td></tr><tr><td>update</td><td colspan=\"2\">Non-Term politics</td><td>Term</td></tr><tr><td colspan=\"3\">advertisement Non-Term contend</td><td>Term</td></tr><tr><td>transgender</td><td>Term</td><td>person</td><td>uncertain</td></tr><tr><td>discriminate</td><td>Term</td><td>epithet</td><td>Term</td></tr><tr><td>right</td><td>Term</td><td>man</td><td>uncertain</td></tr><tr><td>emails</td><td colspan=\"2\">Non-Term retaliate</td><td>Term</td></tr><tr><td>program</td><td colspan=\"2\">Non-Term advertiser</td><td>Non-Term</td></tr><tr><td>verify</td><td colspan=\"2\">Non-Term caste</td><td>Term</td></tr><tr><td>robot</td><td colspan=\"2\">Non-Term city</td><td>Non-Term</td></tr><tr><td>minority</td><td>Term</td><td>engage</td><td>Term</td></tr><tr><td>sex</td><td>Term</td><td>request</td><td>Non-Term</td></tr><tr><td>disability</td><td>Term</td><td>resign</td><td>Term</td></tr><tr><td>hire</td><td>Term</td><td>asylum</td><td>Non-Term</td></tr><tr><td>racism</td><td>Term</td><td>noose</td><td>Term</td></tr><tr><td>ruling</td><td>Term</td><td>dissent</td><td>Non-Term</td></tr><tr><td>view</td><td colspan=\"2\">Non-Term analyze</td><td>Non-Term</td></tr><tr><td colspan=\"2\">neighborhood uncertain</td><td>officer</td><td>Non-Term</td></tr></table>", |
| "html": null |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "num": null, |
| "text": "Top unique TCs", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF9": { |
| "type_str": "table", |
| "num": null, |
| "text": "Top negative re-ranking of Specificity by Dist-Specificity", |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |