| { |
| "paper_id": "W03-0305", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T06:08:57.984305Z" |
| }, |
| "title": "Reducing Parameter Space for Word Alignment", |
| "authors": [ |
| { |
| "first": "Herve", |
| "middle": [], |
| "last": "Dejean", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chemin de Maupertuis", |
| "location": { |
| "postCode": "38240", |
| "settlement": "Meylan", |
| "country": "France" |
| } |
| }, |
| "email": "hdejean@xrce.xerox.com" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Gaussier", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chemin de Maupertuis", |
| "location": { |
| "postCode": "38240", |
| "settlement": "Meylan", |
| "country": "France" |
| } |
| }, |
| "email": "gaussier@xrce.xerox.com" |
| }, |
| { |
| "first": "Cyril", |
| "middle": [], |
| "last": "Goutte", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chemin de Maupertuis", |
| "location": { |
| "postCode": "38240", |
| "settlement": "Meylan", |
| "country": "France" |
| } |
| }, |
| "email": "cgoutte@xrce.xerox.com" |
| }, |
| { |
| "first": "Kenji", |
| "middle": [], |
| "last": "Yamada", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chemin de Maupertuis", |
| "location": { |
| "postCode": "38240", |
| "settlement": "Meylan", |
| "country": "France" |
| } |
| }, |
| "email": "kyamada\u00a1@xrce.xerox.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper presents the experimental results of our attemps to reduce the size of the parameter space in word alignment algorithm. We use IBM Model 4 as a baseline. In order to reduce the parameter space, we pre-processed the training corpus using a word lemmatizer and a bilingual term extraction algorithm. Using these additional components, we obtained an improvement in the alignment error rate.", |
| "pdf_parse": { |
| "paper_id": "W03-0305", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper presents the experimental results of our attemps to reduce the size of the parameter space in word alignment algorithm. We use IBM Model 4 as a baseline. In order to reduce the parameter space, we pre-processed the training corpus using a word lemmatizer and a bilingual term extraction algorithm. Using these additional components, we obtained an improvement in the alignment error rate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "We participated the workshop shared task for English-French and Romanian-English word alignment. We use IBM Model 4 as a baseline. The number of parameters in this model roughly scales as the product of the vocabulary sizes (ie number of types) in the source and target languages. In order to obtain better alignment performance, we wish to investigate techniques that may reduce the number of parameters, therefore increasing the datato-parameter ratio. For that purpose, we preprocessed the training corpus using a word lemmatizer and a bilingual lexicon extraction algorithm. Section 2 briefly describes the base alignment algorithm, Section 3 describes our additional components, and Section 4 shows our experimental results, followed by Discussion and Conclusion in Section 5 and 6, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We use IBM Model 4 (Brown et al., 1993) as a basis for our word alignment system. The model was implemented in a public software package GIZA++ (Och and Ney, 2000) . We use default parameters provided with the package, namely, it was bootstrapped from Model 1 (five iterations), HMM model (five iterations) Model 3 (two iterations) and Model 4 (four iterations).", |
| "cite_spans": [ |
| { |
| "start": 19, |
| "end": 39, |
| "text": "(Brown et al., 1993)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 144, |
| "end": 163, |
| "text": "(Och and Ney, 2000)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Alignment algorithm", |
| "sec_num": "2" |
| }, |
| { |
| "text": "IBM Model 4 is a conditional generative model, which generates an English sentence (and a word alignment) given a foreign sentence (French or Romanian, in our experiments here). In the generative process, each English word ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Alignment algorithm", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u00a5 !\" # \u00a5 \u00a7 \u00a2 $ % ' & ( \u00a5 \u00a7 ) 0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Alignment algorithm", |
| "sec_num": "2" |
| }, |
| { |
| "text": ", which is conditioned on the word classes \" # \u00a5 \u00a2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Alignment algorithm", |
| "sec_num": "2" |
| }, |
| { |
| "text": ". In GIZA++, the word classes are automatically detected by a bilingual clustering algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "& ( \u00a5 \u00a7 )", |
| "sec_num": null |
| }, |
| { |
| "text": "The translation table \u00a5 \u00a2 dominates the parameter space when the vocabulary size grows. In this paper, we focus on how to reduce the table size for \u00a5 \u00a2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "& ( \u00a5 \u00a7 )", |
| "sec_num": null |
| }, |
| { |
| "text": ". We apply two additional methods, lemmatization and bilingual lexicon extraction, described below. We expect two advantages by reducing the model parameter space. One is to reduce the memory usage, which allows us to use more training data. Another is to improve the data-toparameter ratio, and therefore the accuracy of the alignment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "& ( \u00a5 \u00a7 )", |
| "sec_num": null |
| }, |
| { |
| "text": "To reduce the model parameter space, we apply the following two methods. One is a rule-based word lemmatizer and another is a statistical lexical extraction algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reducing the Parameter Space", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We use a word lemmatizer program (XRCE, 2003) which converts words in variant forms into the root forms. We preprocess the training and the test corpora with the lemmatizer. Figure 1 and 2 show examples of how the lemmatizer works. it would have been easy to say that these sanctions have to be followed rather than making them voluntary . it would have be easy to say that these sanction have to be follow rather than make them voluntary . il aurait\u00e9t\u00e9 facile de dire que il faut appliquer ces sanctions\u00e0 le lieu de les rendre facultatives . il avoir\u00eatre facile de dire que il falloir appliquer ce sanction\u00e0 le lieu de le rendre facultatif . this is being done to ensure that our children will receive a pension under the cpp . this be be do to ensure that we child will receive a pension under the cpp . cela permettra\u00e0 nos enfants de pouvoir b\u00e9n\u00e9ficier de le r\u00e9gime de pensions de le canada . cela permettre\u00e0 notre enfant de pouvoir b\u00e9n\u00e9ficier de le r\u00e9gime de pension de le canada . Applying the lemmatizer reduces the parameter space for the alignment algorithm by reducing the vocabulary size. Nouns (and adjectives for French) with different gender and number forms are grouped into the same word. Verbs with different tenses (present, past, etc.) and aspects (-ing, -ed, etc.) are mapped to the same root word. In particular, French verbs have many different conjugations: Some verb variants appear only once or twice in a corpus, and the statistics for those rare words are unreliable. Thus, we expect to improve the model accuracy by treating those variants as the same word.", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 45, |
| "text": "(XRCE, 2003)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 174, |
| "end": 182, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Word Lemmatizer", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "On the other hand, there is a danger that lemmatization may lose useful information provided by the inflected form of a word. In particular, special words such as do and be may have different usage patterns for each variant (e.g., done vs. doing). In that case, lemmatization may actually hurt the performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Lemmatizer", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Another additional component we use is a bilingual lexicon extraction algorithm. We run the algorithm over the same training data, and obtain a list of word translation pairs. The extracted word-pair list is used as an additional training data for GIZA++. This will give some bias for the alignment model parameters. This does not actually reduce the parameter space, but if the bias is taken to the extreme (e.g. some of the model parameters are fixed to zero), it will reduce the parameter space in effect.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual Lexical Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "For the bilingual lexicon extraction, we use a word alignment model different from IBM models. The purpose of using a different model is to extract 1-to-1 word translation pairs more reliably. The model (described below) assumes that a translation sentence pair is preprocessed, so that the pair is a sequence of content words. To select content words, we apply a part-of-speech tagger to remove non content words (such as determiners and prepositions). As the model focuses on the alignment of content words, we expect better performance than IBM models for extracting content word translation pairs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual Lexical Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We give here a brief description of the bilingual lexi-con extraction method we use. This method takes as input a parallel corpus, and produces a probabilistic bilingual lexicon. Our approach relies on the word-to-word translation lexicon obtained from parallel corpora following the method described in (Hull, 1999) , which is based on the word-to-word alignment presented in (Himstra, 1996) . We first represent co-occurrences between words across translations by a matrix, the rows of which represent the source language words, the columns the target language words, and the elements of the matrix the expected alignment frequencies (EAFs) for the words appearing in the corresponding row and column. Empty words are added in both languages in order to deal with words with no equivalent in the other language.", |
| "cite_spans": [ |
| { |
| "start": 304, |
| "end": 316, |
| "text": "(Hull, 1999)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 377, |
| "end": 392, |
| "text": "(Himstra, 1996)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual Lexical Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The estimation of the expected alignment frequency is based on the Iterative Proportional Fitting Procedure (IPFP) presented in (Bishop et al., 1975) . This iterative procedure updates the current estimate \u00a4 \u00a9 1 3 2 5 4 6 8 7", |
| "cite_spans": [ |
| { |
| "start": 128, |
| "end": 149, |
| "text": "(Bishop et al., 1975)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual Lexical Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "of the EAF of source word 9 with target word @ , using the following two-stage equations: are the observed frequencies of co-occurrences, obtained by considering each pair of aligned sentences and by incrementing the alignment frequencies accordingly. The sequence of updates will eventually converge and the EAFs are then normalized (by dividing each element \u00a4 6 8 7", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual Lexical Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "A ) B D C F EG \u00a7 H IP Q R S EB I E P H U T S A ) B D C F V G W EX W H IP \u1ef2 I A B D C F V a G W EX W H I (1) A B D C F EX W H IP Q R S EB I E P H U T S A B D C F EG \u00a7 H IP \u1ef2 P A b B 8 C F EG \u00a7 H c P", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual Lexical Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "by the row marginal", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual Lexical Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u00a4 6 U d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual Lexical Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "), so as to yield probabilistic translation lexicons, in which each source word is associated with a target word through a score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual Lexical Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Using 0.01 0.286 0.307 0.382 trilex-er-all-01-2 0.01 (2) 0.281 0.302 trilex-er-all-01-5 0.01 (5) 0.282 0.298 0.374 trilex-er-all-01-10 0.01 (10) 0.283 0.295 trilex-er-all-01-20 0.01 (20) 0.296 0.297 trilex-er-all-01-50 0.01 (50) 0.293 0.300 Table 2 : Romanian-English shared task simple heuristic, based on the best match criterion described in (Gaussier et al., 2000) to align lexical words within sentences. We then count how many times two given words are aligned in such a way, and normalize the counts so as to get our final probabilistic translation lexicon.", |
| "cite_spans": [ |
| { |
| "start": 345, |
| "end": 368, |
| "text": "(Gaussier et al., 2000)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 241, |
| "end": 248, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Bilingual Lexical Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the English-French shared task, we experimented the effect of the word lemmatizer. Table 1 shows the results. 1 Due to our resource constraints, we used only a portion of the corpus provided by the shared task organizer. Most of our English-French experiments were carried out with the half (10 million) or the quarter (5 million) of the training corpus. We ran three different systems (nolem, base, and delem) with some different parameters. The system nolem is a plain GIZA++ program. We only lowercased the training and the test corpus for nolem. In base and delem, the corpus were preprocessed by the lemmatizer. In base system, the lemmatizer was applied blindly, while in delem, only rare words were applied with lemmatization.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 86, |
| "end": 114, |
| "text": "Table 1 shows the results. 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "English-French shared task", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "As seen in Table 1 , applying the lemmatizer blindly (base) hurt the performance. We hypothesized that the lemmatizer hurts more, when the corpus size is bigger. In fact, the Trial AER was better in base-ef-56k than nolem-ef-56k. Then, we tested the performance when we lemmatized only rare words. We used word frequency threshold to decide whether to lemmatize or not. For example, delem-ef-2-280k lemmatized a word if it appeared less than twice in the training corpus. In general, the selective lemmatization (delem-ef-*-280k) works better than complete lemmatization (base-ef-280k). In some thresholds (delem-ef-w 100,1000x -280k), the Test AER was slightly better than no lemmatization (nolemef-280k). However, from this experiment, it is not clear where we should set the threshold. We are now investigating this issue.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 11, |
| "end": 18, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "English-French shared task", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In the Romanian-English shared task, we experimented how the bilingual lexicon extraction method affects the performance. Table 2 shows the results.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 122, |
| "end": 129, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Romanian-English shared task", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We have three systems nolem, base, and trilex for this task. The first two systems are the same as the English-French shared task, except we use a lemmatizer only for English. 2 The system trilex uses additional bilingual lexicon for training GIZA++. The lexicon was extracted by the algorithm described in 3.2. We tried different thresholds s t v to decide which extracted lexicons are used. It is an estimated word translation probability given by the extraction algorithm. We also tested the effect of duplicating the additional lexicon by 2, 5, 10, or 20 times, to further bias the model parameters.", |
| "cite_spans": [ |
| { |
| "start": 176, |
| "end": 177, |
| "text": "2", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Romanian-English shared task", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "As our extraction method currently assumes word lemmatization, we only compare trilex results with base systems. As seen in the Table 2 , it performed better when the extracted lexicons were added to the training data (e.g., base-er-all vs. trilex-er-all-01). The lexicon duplication worked best when the duplication was only twice, i.e. duplicating additional lexicon too much hurt the performance. For the threshold s t v , it worked better when it was set lower (i.e., adding more words). Due to the time constraints, we didn't test further lower thresholds.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 128, |
| "end": 135, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Romanian-English shared task", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "As we expected, the lemmatizer reduced the memory requirement, and improved the word alignment accuracy when it was applied only for infrequent words. The behavior of using different threshold to decide whether to lemmatize or not is unclear, so we are now investigating this issue.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Adding extracted bilingual lexicons to the training data also showed some improvement in the alignment accuracy. Due to our experimental setup, we were unable carry this experiment with selective lemmatization. We are going to try such experiment pretty soon.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We presented our experimental results of the workshop shared task, by using IBM model 4 as a baseline, and by using a word lemmatizer and a bilingual lexicon extraction algorithm as additional components. They showed some improvement over the baseline, and suggests the need of careful parameter settings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In the table, AER stands for Average Error Rate without null-aligned words, and AERn was calculated with null-aligned words. See the workshop shared-task guideline for the definition of AER. Mem is the memory requirement for running GIZA++.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We do not have a Romanian lemmatizer, but we used a partof-speech tagger by Dan Tufis for Romanian to extract bilingual lexicon.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We are grateful to Dan Tufis for a Romanian corpus preprocessed with his Romanian part-of-speech tagger. This research was supported by the European Commission under the TransType2 project no. IST-2001-32091. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgment", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "The Mathematics of Statistical Machine Translation: Parameter Estimation", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "Della" |
| ], |
| "last": "Pietra", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [ |
| "Della" |
| ], |
| "last": "Pietra", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mercer", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational Linguistics", |
| "volume": "19", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brown P, S. Della Pietra, V. Della Pietra, and R. Mercer. 1993. The Mathematics of Statistical Machine Trans- lation: Parameter Estimation. Computational Linguis- tics, 19(2).", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Discrete Multivariate Analysis", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Bishop", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Fiendbeg", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Holland", |
| "suffix": "" |
| } |
| ], |
| "year": 1975, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bishop S. Fiendbeg and P. Holland. 1975. Discrete Mul- tivariate Analysis, MIT-Press.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Parallel Text Processing -Alignment and Use of Translation Corpora", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Gaussier", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Hull", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ait-Mokhtar", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gaussier E, D. Hull, and S. Ait-Mokhtar. 2000. Term Alignment in Use: Machine-Aided Human Transla- tion. In J. Veronis, Ed. Parallel Text Processing - Alignment and Use of Translation Corpora. Kluwer Academic Publishers.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Using Statistical Methods to Create a Bilingual Dictionary", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Hiemstra", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hiemstra D. 1996. Using Statistical Methods to Create a Bilingual Dictionary. Master Thesis. Universiteit Twente.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Automating the construction of bilingual terminology lexicons", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Hull", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Terminology", |
| "volume": "", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hull D. 1999. Automating the construction of bilingual terminology lexicons. Terminology, 4(2).", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Improved Statistical Alignment Models", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [ |
| "J" |
| ], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Och F. J. and H. Ney. 2000. Improved Statistical Align- ment Models. ACL-00.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Finite-State Linguistic Components", |
| "authors": [], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xerox Research Centre Europe. 2003. Finite-State Linguistic Components. CA Linguistic Technology: Demos.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Lemmatizer Example 1", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "text": "Lemmatizer Example 2", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| } |
| } |
| } |
| } |