| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:01:35.133113Z" |
| }, |
| "title": "Evaluation of Pretrained BERT Model by Using Sentence Clustering", |
| "authors": [ |
| { |
| "first": "Naoki", |
| "middle": [], |
| "last": "Shibayama", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ibaraki University", |
| "location": { |
| "addrLine": "Sciences 4-12-1 Nakanarusawa", |
| "postCode": "316-8511", |
| "settlement": "Hitachi", |
| "region": "Ibaraki JAPAN" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ibaraki University", |
| "location": { |
| "addrLine": "Sciences 4-12-1 Nakanarusawa", |
| "postCode": "316-8511", |
| "settlement": "Hitachi", |
| "region": "Ibaraki JAPAN" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Bai", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ibaraki University", |
| "location": { |
| "addrLine": "Sciences 4-12-1 Nakanarusawa", |
| "postCode": "316-8511", |
| "settlement": "Hitachi", |
| "region": "Ibaraki JAPAN" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Wen", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ibaraki University", |
| "location": { |
| "addrLine": "Sciences 4-12-1 Nakanarusawa", |
| "postCode": "316-8511", |
| "settlement": "Hitachi", |
| "region": "Ibaraki JAPAN" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hiroyuki", |
| "middle": [], |
| "last": "Shinnou", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ibaraki University", |
| "location": { |
| "addrLine": "Sciences 4-12-1 Nakanarusawa", |
| "postCode": "316-8511", |
| "settlement": "Hitachi", |
| "region": "Ibaraki JAPAN" |
| } |
| }, |
| "email": "hiroyuki.shinnou.0828@vc.ibaraki.ac.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "For evaluation of pre-trained models like bidirectional encoder representations from transformers (BERT), task-based approaches are frequently adopted and there is a possibility that meta parameters for fine-tuning influence results of the evaluations. However, taskbased approaches for languages, except English, have a problem-there is no common dataset for their evaluation. Hence, evaluating pre-trained models for these languages with task-based approaches is challenging. In this work, we evaluate Japanese pre-trained BERT models with CLS token. We input labeled sentences to models, get CLS token embeddings, and calculate scores from in-class and outof-class dispersions, which can be calculated from embeddings and labels of sentences. Experiment results show that a model released by Laboro.AI Inc. is the best Japanese pretrained BERT model. Meanwhile, the results of evaluation with sentence clustering are different from those of evaluations that are based on fill mask task.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "For evaluation of pre-trained models like bidirectional encoder representations from transformers (BERT), task-based approaches are frequently adopted and there is a possibility that meta parameters for fine-tuning influence results of the evaluations. However, taskbased approaches for languages, except English, have a problem-there is no common dataset for their evaluation. Hence, evaluating pre-trained models for these languages with task-based approaches is challenging. In this work, we evaluate Japanese pre-trained BERT models with CLS token. We input labeled sentences to models, get CLS token embeddings, and calculate scores from in-class and outof-class dispersions, which can be calculated from embeddings and labels of sentences. Experiment results show that a model released by Laboro.AI Inc. is the best Japanese pretrained BERT model. Meanwhile, the results of evaluation with sentence clustering are different from those of evaluations that are based on fill mask task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "BERT (Devlin et al., 2019 ) is a high-performance pre-training model. It helped in the improvement of the performance of natural language processing tasks. Generally, task-based approaches were adopted for evaluating pre-training models like BERT. In English language, a dataset for task-based evaluation, such as the general language understanding evaluation (GLUE) (Wang et al., 2018) , can be used, and it is easy to compare models. However, when a pre-trained model is fine-tuned for taskbased evaluation, meta parameters for fine-tuning may influence scores of the model. Hence, taskbased evaluation with fine-tuning has a possibility of biased evaluation. Also, there is no common taskbased dataset for languages except English, so it is challenging to compare pre-trained models for other languages.", |
| "cite_spans": [ |
| { |
| "start": 5, |
| "end": 25, |
| "text": "(Devlin et al., 2019", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 367, |
| "end": 386, |
| "text": "(Wang et al., 2018)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we evaluate Japanese pre-trained BERT models using CLS token embeddings in outputs of target models. CLS token embedding can be regarded as an input sentence embedding, and models can be rated with evaluating embeddings itself. However, how to evaluate sentence embeddings is also challenging. Here, we use clustering to evaluate sentence embeddings. Also, we prepare sets of sentences sorted by genre and use BERT models to get embeddings of each sentence. Then, we cluster those embeddings and evaluate models with clustering score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The GLUE can be used for English, but there is no common dataset for other languages, so we have to prepare the dataset for evaluation ourselves.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There is a work that compared and evaluated some Japanese pre-trained BERT models. In this work, we evaluated three BERT models using document classification tasks with the Amazon dataset (Shibayama et al., 2019) . However, BERT is a model for sentences, and there is no established method of document classification with BERT. Therefore, whether document classification is the right task to evaluate or not is questionable. We use a sentence as input of BERT and evaluate models using CLS token embeddings, which can be considered as sentence embeddings from outputs of BERT.", |
| "cite_spans": [ |
| { |
| "start": 188, |
| "end": 212, |
| "text": "(Shibayama et al., 2019)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The approaches for evaluation of embeddings are task-based, but in the case of word embeddings from outputs of some method like word2vec (Mikolov et al., 2013) , there is a viewpoint that embeddings represent the meaning of words. Also, there is a research that evaluated embeddings with correlation of similarities between words calculated from the similarity of embeddings and by hand (Sakaizawa and Komachi, 2016) .", |
| "cite_spans": [ |
| { |
| "start": 137, |
| "end": 159, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 387, |
| "end": 416, |
| "text": "(Sakaizawa and Komachi, 2016)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In Section 2, we mentioned that a task-based approach is frequently adopted to evaluate embeddings. Also, we mentioned that there is a viewpoint that embeddings represent the meaning of words. When this viewpoint is applied to clustering, we can say that a cluster can be represented by a group of embeddings in it. In what follows, we use this to evaluate pre-trained BERT models with sentence clustering.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation of BERT", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Embeddings that were outputted from BERT model m, were evaluated by the following 5 steps. Labels for sentences of model m's input were required to do this evaluation. 3. Calculate A m : in-class dispersion of each class from the following expression 1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method of the Evaluation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "A m = N \u2211 i=1 \u03c3 2 i (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method of the Evaluation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method of the Evaluation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u03c3 2 i = \u2211 j\u2208C i ||g (m) i \u2212 x i,j || 2 , C i is class i and N is number of classes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method of the Evaluation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "4. Calculate g (m) : average centroids of all classes and calculate B m : out-of-class dispersion from the following expression. 2", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 18, |
| "text": "(m)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method of the Evaluation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "B m = N \u2211 i=1 ||g (m) \u2212g (m) i || 2 (N = Number of classes)", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Method of the Evaluation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "5. Calculate a degree of separation: M m = Am Bm , and use M m as a score of model m. This score becomes smaller when clustering with model m is performed properly. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method of the Evaluation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We re-evaluated models with a fill mask task in order to verify the results of sentence clustering evaluation. The steps for the re-evaluation are as following:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Re-evaluation by Using Fill Mask Task", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "1. Prepare a dataset-we prepared a dataset that contains sentences and which word to be masked in matching sentence as labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Re-evaluation by Using Fill Mask Task", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "2. Predict masked word with model-we calculated percentages that mask token was the word in matching label which was defined in a dataset from outputs of models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Re-evaluation by Using Fill Mask Task", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "3. Average and comparison-we compared averages of percentages that were calculated in step 2. Detailed information on the abovementioned comparison is in the next subsection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Re-evaluation by Using Fill Mask Task", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Firstly, we explain common setups for evaluation with sentence clustering and fill mask task. We compared six models: a model which was released Inc.(hereafter, Laboro Ver.) 8 . We did not fine-tune models for comparisons. Table 1 summarizes the word tokenizer and pretraining corpus of pre-trained models. Model size of all models are base format of official BERT (Devlin et al., 2019) : 12-layer, 768-hidden, and 12-heads. So sentence vectors we got in the evaluation with sentence clustering have 768 dimentions. Juman++ and MeCab are software for morphological analysis. Juman++ uses Recurrent Neural Network Language Model and MeCab uses bi-gram Markov model for analysing. SentencePiece is unsupervised text tokenizer and detokenizer, so model publishers which use SentencePiece as tokenizer release pre-trained SentencePiece model with their BERT model. In the evaluation with sentence clustering, we used Livedoor news corpus 9 . This dataset contains nine categories of articles and we used one hundred articles per category. We extracted titles from selected articles and regarded categories as classes. Then, we calculated scores with the method in Section 3.1 and compared these scores.", |
| "cite_spans": [ |
| { |
| "start": 365, |
| "end": 386, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 223, |
| "end": 230, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setups", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In the evaluation with fill mask task, we made a fill mask dataset from Japanese domain of Webis-8 https://laboro.ai/column/laboro-bert/ 9 http://www.rondhuit.com/download.html# ldcc CLS-10 (Prettenhofer and Stein, 2010) and used it. The following two steps show how to make a fill mask dataset from Webis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setups", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "1. Pick twenty nouns that have the highest frequencies of occurence from the test data of each domain: books, DVDs, and music.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setups", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "2. Pick five sentences that contain matching selected words from test data of the matching domain randomly to each selected word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setups", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "3. Use nouns which were selected in step 1 as labels for matching sentences which were selected in step 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setups", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 We replaced \" \" (Fullwidth form of CD) with CD (Halfwidth form of CD).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setups", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 We lowercased sentences when we used SP Ver. model. We did not lowercase sentences when we used SP Ver. model for the first time, and the model tokenized CD as token \"C\" and token \"D\". We checked vocabulary file of the model and found the word \"cd\", not \"CD\". So we recognized we needed to activate lowercasing option.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setups", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In this section, we show the resluts of the evaluations. First, we show the result of the evaluation with sentence clustering, and then the result of evaluation with fill mask task. Table 2 summarizes A m , B m , and scores of evaluation with sentence clustering. Bigger B m is better, and smaller A m and score are better. The following shows the results of comparing models by score, A m , and B m , and figure 2 is a bar graph of the results. Score: Laboro Ver. < MeCab Ver. < SP Ver. < NICT Ver. < Kyoto Univ. Ver. < Tohoku Univ.Ver. A m : Tohoku Univ. Ver. < SP Ver. < MeCab Ver. < NICT Ver. < Laboro Ver. < Kyoto Univ. Ver. B m : Kyoto Univ. Ver. > Laboro Ver. > MeCab Ver. > NICT Ver. > SP Ver. > Tohoku Univ.Ver. Table 3 shows scores of models except NICT Ver., and Laboro Ver. in previous work (Shibayama et al., 2020) and this work. According to this table, Scores of models except MeCab Ver. changes about 0.8-30 from results in previous evaluation (Shibayama et al., 2020) . These changes did not influence the results of comparisons. Score of MeCab Ver. model became 100 or more higher than the previous result, but this change also did not influence results. Task Table 4 shows average of percentages that mask token is the word in matching label of all domains and three domains: books, DVDs, and music. Figure 3 shows a bar graph of column \"All\" in table 4. The following shows the result of comparing models by percentages of all domains, and this is different from the results in Section 4.1. ", |
| "cite_spans": [ |
| { |
| "start": 803, |
| "end": 827, |
| "text": "(Shibayama et al., 2020)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 960, |
| "end": 984, |
| "text": "(Shibayama et al., 2020)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 182, |
| "end": 189, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 721, |
| "end": 728, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 1173, |
| "end": 1187, |
| "text": "Task Table 4", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 1321, |
| "end": 1329, |
| "text": "Figure 3", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In this section, we describe the results in Section 4, and why there is a difference between the results in Section 4.1 and Section 4.2. We changed the tokenizer settings for MeCab Ver. model from previous evaluation not to use subword tokenize 10 . We think this influenced the score of MeCab Ver. model, which caused a difference from a previous work (Shibayama et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 353, |
| "end": 377, |
| "text": "(Shibayama et al., 2020)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "As mentioned earlier, we considered A m and B m as in-class and out-of-class dispersion, respectively, in order to calculate easily (see, footnotes of Section 3.1). Therefore, comparing A m means evaluating whether embeddings in the same class are close, and B m means evaluating differences of embeddings that are not in the same class. We can dedude the general tendencies of each model from the results in Section 4.1. The best model is Laboro Ver., which has the second-highest B m and about 100000 smaller A m than Kyoto Univ. Ver. model. MeCab Ver. model that has the best score in previous eval-uation (Shibayama et al., 2020) is the second-best model. SP Ver. model is the third, which A m of model is it of Tohoku Univ.Ver. model or more and it of MeCab Ver. model less. Tohoku Univ. Ver. model has the worst score, which has smallest A m and B m . This means the dispersion of all embeddings is smaller than the other models.", |
| "cite_spans": [ |
| { |
| "start": 609, |
| "end": 633, |
| "text": "(Shibayama et al., 2020)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "However, the results in Section 4.1 are different from the results in Section 4.2. Thus, we could not conclude that the results of methods of evaluation with sentence clustering and fill mask task have the same tendency. We used the title of articles in evaluation with sentence clustering, but we used a sentence in product reviews (see synopsis of Webis-CLS-10 (Prettenhofer and Stein, 2010)) with fill mask task. This difference may have caused the differences between the results in Section 4.1 and Section 4.2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We evaluated Japanese pre-trained BERT models using sentences that were labeled, and outputs of BERT that inputted those sentences. Then, we obtained the following result. Laboro Ver. < MeCab Ver. < SP Ver. < NICT Ver. < Kyoto Univ. Ver. < Tohoku Univ. Ver.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Also, we masked a specific noun in each sentence, calculated percentage that mask token is the word in matching label, and re-evaluated with averages of that percentage. However, we obtained the following result, and this is different from result of sentence clustering.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Tohoku Univ. Ver. > NICT Ver. > MeCab Ver. > Kyoto Univ. Ver > Laboro Ver. > SP Ver.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "If we decrease the difference of type or domain of documents that are used in both experiments, there is a chance that the comparison results will be different from what we obtained in this work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In this section, we show basic hyperparameters of pre-trained BERT models we evaluated. However, some parameters were not written in both config file of model and model publisher's web site. Table 5 shows basic pre-training information of pre-trained models. \"No Info\" cell is a parameter that we could not found the correct value. Some publishers pretrained the model with two step pre-training, and we show those as Ph1 parameter and Ph2 parameter if there is differences. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 191, |
| "end": 198, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Basic Hyperparameters of Models We Evaluated", |
| "sec_num": null |
| }, |
| { |
| "text": "We consider the second power of deviation as the dispersion in this work in order to calculate easily. So, true in-class dispersion can be calculated from \u03c3 2 i /N . 2 Also, we consider the second power of deviation of centroids of all classes as dispersion like Am.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "According to an article of MeCab Ver. model, we have to change scripts that use only MeCab as a tokenizer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by JSPS KAKENHI Grant Number JP19K12093.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgement", |
| "sec_num": null |
| }, |
| { |
| "text": "We replaced \"selected nouns\" which appeared for the first time in matching sentence with mask token. The following shows selected Japanese nouns for each domain.We calculated percentages that mask token is the word in matching label with prepared dataset and each model. Then, we averaged percentages and compared these. The following shows notices of this comparison.\u2022 We used transformers (Wolf et al., 2019) to solve the fill mask task.", |
| "cite_spans": [ |
| { |
| "start": 391, |
| "end": 410, |
| "text": "(Wolf et al., 2019)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "NAACL-2019", |
| "volume": "", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In NAACL-2019, pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "NIPS-2013", |
| "volume": "", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013. Distributed representa- tions of words and phrases and their compositionality. In NIPS-2013, pages 3111-3119.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Cross-Language Text Classification using Structural Correspondence Learning", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Prettenhofer", |
| "suffix": "" |
| }, |
| { |
| "first": "Benno", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "48th Annual Meeting of the Association of Computational Linguistics (ACL 10)", |
| "volume": "", |
| "issue": "", |
| "pages": "1118--1127", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Prettenhofer and Benno Stein. 2010. Cross- Language Text Classification using Structural Corre- spondence Learning. In 48th Annual Meeting of the Association of Computational Linguistics (ACL 10), pages 1118-1127. Association for Computational Lin- guistics, July.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Building similarity dataset of japanese verbs and adjectives", |
| "authors": [ |
| { |
| "first": "Yuuya", |
| "middle": [], |
| "last": "Sakaizawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Mamoru", |
| "middle": [], |
| "last": "Komachi", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "The Twenty-second Annual Meeting of the Association for Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "258--261", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuuya Sakaizawa and Mamoru Komachi. 2016. Build- ing similarity dataset of japanese verbs and adjectives (in Japanese). The Twenty-second Annual Meeting of the Association for Natural Language Processing, pages 258-261.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A comparison of japanese pretrained bert models", |
| "authors": [ |
| { |
| "first": "Naoki", |
| "middle": [], |
| "last": "Shibayama", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Bai", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroyuki", |
| "middle": [], |
| "last": "Shinnou", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "IEICE Techn. Rep", |
| "volume": "119", |
| "issue": "212", |
| "pages": "89--92", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naoki Shibayama, Rui Cao, Jing Bai, Wen Ma, and Hi- royuki Shinnou. 2019. A comparison of japanese pre- trained bert models (in Japanese). IEICE Techn. Rep., 119(212):89-92.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Evaluation of pretrained BERT model by using sentence clustering", |
| "authors": [ |
| { |
| "first": "Naoki", |
| "middle": [], |
| "last": "Shibayama", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Bai", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroyuki", |
| "middle": [], |
| "last": "Shinnou", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "The Twenty-sixth Annual Meeting of the Association for Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1233--1236", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naoki Shibayama, Rui Cao, Jing Bai, Wen Ma, and Hi- royuki Shinnou. 2020. Evaluation of pretrained BERT model by using sentence clustering (in Japanese). In The Twenty-sixth Annual Meeting of the Association for Natural Language Processing, pages 1233-1236.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Glue: A multi-task benchmark and analysis platform for natural language understanding", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel R", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1804.07461" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2018. Glue: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Huggingface's transformers: State-of-the-art natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "R'emi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Brew", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chau- mond, Clement Delangue, Anthony Moi, Pierric Cis- tac, Tim Rault, R'emi Louf, Morgan Funtowicz, and Jamie Brew. 2019. Huggingface's transformers: State-of-the-art natural language processing. ArXiv, abs/1910.03771.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "1. Get CLS token's embedding from the output of each sentence of model m, and use the embedding as the sentence vector.2. Check which class contains the sentence vector, and calculate g (m) i: centroid of each class of model m.", |
| "num": null |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "summarizes the flow of the evaluation.", |
| "num": null |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "The flow of evaluation with sentence clustering", |
| "num": null |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "The results of comparing models by score", |
| "num": null |
| }, |
| "FIGREF4": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Averages of percentages of all domains", |
| "num": null |
| }, |
| "TABREF1": { |
| "text": "Information of pre-trained BERT models", |
| "num": null, |
| "content": "<table><tr><td>Models</td><td>Tokenizer</td><td>Pre-training</td><td/></tr><tr><td/><td>(characteristic)</td><td>corpus</td><td/></tr><tr><td>Kyoto Univ.</td><td>Juman++</td><td>Wikipedia</td><td/></tr><tr><td>Ver.</td><td/><td/><td/></tr><tr><td>MeCab Ver.</td><td>MeCab + NE-</td><td>Articles</td><td>of</td></tr><tr><td/><td>ologd (No sub-</td><td colspan=\"2\">business news</td></tr><tr><td/><td>word tokenize)</td><td/><td/></tr><tr><td>SP Ver.</td><td>SentencePiece</td><td>Wikipedia</td><td/></tr><tr><td/><td>(do lower case</td><td/><td/></tr><tr><td/><td>= True)</td><td/><td/></tr><tr><td>Tohoku</td><td>MeCab + NE-</td><td>Wikipedia</td><td/></tr><tr><td>Univ. Ver.</td><td>ologd</td><td/><td/></tr><tr><td>NICT Ver.</td><td>MeCab + Ju-</td><td>Wikipedia</td><td/></tr><tr><td/><td>mandic</td><td/><td/></tr><tr><td>Laboro Ver.</td><td>SentencePiece</td><td colspan=\"2\">Texts on the In-</td></tr><tr><td/><td/><td colspan=\"2\">ternet (12GB)</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF2": { |
| "text": "Values and scores of evaluation with sentence clustering", |
| "num": null, |
| "content": "<table><tr><td>Models</td><td>A m</td><td>B m</td><td>Score</td></tr><tr><td>Kyoto Univ.</td><td colspan=\"3\">240131.79 337.83 710.81</td></tr><tr><td>Ver.</td><td/><td/><td/></tr><tr><td>MeCab Ver.</td><td colspan=\"3\">97536.21 154.37 631.06</td></tr><tr><td>SP Ver.</td><td colspan=\"3\">67744.36 104.05 651.06</td></tr><tr><td>Tohoku</td><td>49991.31</td><td colspan=\"2\">65.64 761.58</td></tr><tr><td>Univ. Ver.</td><td/><td/><td/></tr><tr><td>NICT Ver.</td><td colspan=\"3\">106698.11 151.27 705.37</td></tr><tr><td colspan=\"4\">Laboro Ver. 153378.22 273.83 560.13</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF3": { |
| "text": "", |
| "num": null, |
| "content": "<table><tr><td colspan=\"3\">: Scores of previous work(Shibayama et al., 2020)</td></tr><tr><td>and this work</td><td/><td/></tr><tr><td>Models</td><td>previous</td><td>this</td></tr><tr><td>Kyoto</td><td colspan=\"2\">710.88 710.81</td></tr><tr><td>Univ. Ver.</td><td/><td/></tr><tr><td>MeCab</td><td colspan=\"2\">458.19 631.06</td></tr><tr><td>Ver.</td><td/><td/></tr><tr><td>SP Ver.</td><td colspan=\"2\">668.92 651.06</td></tr><tr><td>Tohoku</td><td colspan=\"2\">792.34 761.58</td></tr><tr><td>Univ. Ver.</td><td/><td/></tr><tr><td colspan=\"3\">4.2 Result of Re-evaluation with Fill Mask</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF4": { |
| "text": "Average of percentages that mask token is true masked word", |
| "num": null, |
| "content": "<table><tr><td>Models</td><td>books</td><td>DVDs</td><td>music</td><td>All</td></tr><tr><td>Kyoto</td><td colspan=\"4\">11.53% 11.18% 9.24% 10.65%</td></tr><tr><td>Univ.</td><td/><td/><td/><td/></tr><tr><td>Ver.</td><td/><td/><td/><td/></tr><tr><td>MeCab</td><td colspan=\"4\">11.24% 13.62% 7.62% 10.83%</td></tr><tr><td>Ver.</td><td/><td/><td/><td/></tr><tr><td>SP Ver.</td><td>7.36%</td><td>9.86%</td><td>6.41%</td><td>7.88%</td></tr><tr><td>Tohoku</td><td colspan=\"4\">14.04% 12.76% 10.81% 12.54%</td></tr><tr><td>Univ.</td><td/><td/><td/><td/></tr><tr><td>Ver.</td><td/><td/><td/><td/></tr><tr><td>NICT</td><td colspan=\"4\">11.90% 12.63% 8.68% 11.07%</td></tr><tr><td>Ver.</td><td/><td/><td/><td/></tr><tr><td>Laboro</td><td colspan=\"3\">8.86% 10.44% 9.85%</td><td>9.72%</td></tr><tr><td>Ver.</td><td/><td/><td/><td/></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF5": { |
| "text": "Basic pre-training information of BERT models", |
| "num": null, |
| "content": "<table><tr><td colspan=\"2\">Models Model</td><td>Whole</td><td>Vocabulary</td><td>max</td></tr><tr><td/><td>Size</td><td>Word</td><td>Size</td><td>seq</td></tr><tr><td/><td/><td>Mask-</td><td/><td>length</td></tr><tr><td/><td/><td>ing</td><td/></tr><tr><td>Kyoto</td><td colspan=\"2\">Base No</td><td>32,000</td><td>128</td></tr><tr><td>Univ.</td><td/><td/><td/></tr><tr><td>Ver.</td><td/><td/><td/></tr><tr><td>MeCab</td><td colspan=\"2\">Base No</td><td>32,000</td><td>No Info</td></tr><tr><td>Ver.</td><td/><td/><td/></tr><tr><td colspan=\"3\">SP Ver. Base No</td><td>32,000</td><td>No Info</td></tr><tr><td>Tohoku</td><td colspan=\"2\">Base No</td><td>32,000</td><td>512</td></tr><tr><td>Uinv.</td><td/><td/><td/></tr><tr><td>Ver.</td><td/><td/><td/></tr><tr><td>NICT</td><td colspan=\"2\">Base No</td><td>32,000</td><td>Ph1-</td></tr><tr><td>Ver.</td><td/><td/><td/><td>128</td></tr><tr><td/><td/><td/><td/><td>Ph2-</td></tr><tr><td/><td/><td/><td/><td>512</td></tr><tr><td>Laboro</td><td colspan=\"2\">Base No</td><td>32,000</td><td>Ph1-</td></tr><tr><td>Ver.</td><td/><td/><td/><td>128</td></tr><tr><td/><td/><td/><td/><td>Ph2-</td></tr><tr><td/><td/><td/><td/><td>512</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |