| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:54:09.707227Z" |
| }, |
| "title": "Reproducible and Efficient Benchmarks for Hyperparameter Optimization of Neural Machine Translation Systems", |
| "authors": [ |
| { |
| "first": "Xuan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": {} |
| }, |
| "email": "xuanzhang@jhu.edu" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": {} |
| }, |
| "email": "kevinduh@cs.jhu.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Hyperparameter selection is a crucial part of building neural machine translation (NMT) systems across both academia and industry. Fine-grained adjustments to a model's architecture or training recipe can mean the difference between a positive and negative research result or between a state-of-the-art and underperforming system. While recent literature has proposed methods for automatic hyperparameter optimization (HPO), there has been limited work on applying these methods to neural machine translation (NMT), due in part to the high costs associated with experiments that train large numbers of model variants. To facilitate research in this space, we introduce a lookup-based approach that uses a library of pre-trained models for fast, low cost HPO experimentation. Our contributions include (1) the release of a large collection of trained NMT models covering a wide range of hyperparameters, (2) the proposal of targeted metrics for evaluating HPO methods on NMT, and (3) a reproducible benchmark of several HPO methods against our model library, including novel graph-based and multiobjective methods.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Hyperparameter selection is a crucial part of building neural machine translation (NMT) systems across both academia and industry. Fine-grained adjustments to a model's architecture or training recipe can mean the difference between a positive and negative research result or between a state-of-the-art and underperforming system. While recent literature has proposed methods for automatic hyperparameter optimization (HPO), there has been limited work on applying these methods to neural machine translation (NMT), due in part to the high costs associated with experiments that train large numbers of model variants. To facilitate research in this space, we introduce a lookup-based approach that uses a library of pre-trained models for fast, low cost HPO experimentation. Our contributions include (1) the release of a large collection of trained NMT models covering a wide range of hyperparameters, (2) the proposal of targeted metrics for evaluating HPO methods on NMT, and (3) a reproducible benchmark of several HPO methods against our model library, including novel graph-based and multiobjective methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Choosing effective hyperparameters is crucial for building strong neural machine translation (NMT) systems. Although some choices present obvious trade-offs (e.g., more and larger layers tend to increase quality at the cost of speed), others are more subtle (e.g., effects of batch size, learning rate, and normalization techniques on different layer types). Optimal versus suboptimal hyperparameters can lead to dramatic swings in system performance; consider the wide range of BLEU scores for variants of the same base system in Figure 1 (left). In practice, these hyperparame-ters are often tuned manually based on intuition and heuristics, a tedious and error-prone process that can lead to unreliable experimental results and underperforming shared task or production systems. The difficulty is compounded when system builders must jointly optimize multiple objectives, such as translation accuracy (BLEU) and decoding speed, which are largely uncorrelated as shown in Figure 1 (right).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 531, |
| "end": 539, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 974, |
| "end": 982, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In the past decade, various hyperparameter optimization (HPO) methods have emerged in the machine learning literature under the labels of ''AutoML'' (Bergstra et al., 2011; Hutter et al., 2011; Bardenet et al., 2013; Snoek et al., 2015) and ''neural architecture search'' (Zoph and Le, 2016; Liu et al., 2018a,b; Cai et al., 2018; . However, it is unclear how they perform on NMT; we are not aware of any prior work with comprehensive evaluation. One challenge is that the state-of-the-art NMT models (Sutskever et al., 2014; Bahdanau et al., 2015; Gehring et al., 2017; Vaswani et al., 2017 ) require significant computational resources for training. Secondly, they usually have large hyperparameter search spaces. Thus, it is prohibitively expensive in practice to compare HPO methods on NMT tasks.", |
| "cite_spans": [ |
| { |
| "start": 149, |
| "end": 172, |
| "text": "(Bergstra et al., 2011;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 173, |
| "end": 193, |
| "text": "Hutter et al., 2011;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 194, |
| "end": 216, |
| "text": "Bardenet et al., 2013;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 217, |
| "end": 236, |
| "text": "Snoek et al., 2015)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 272, |
| "end": 291, |
| "text": "(Zoph and Le, 2016;", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 292, |
| "end": 312, |
| "text": "Liu et al., 2018a,b;", |
| "ref_id": null |
| }, |
| { |
| "start": 313, |
| "end": 330, |
| "text": "Cai et al., 2018;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 501, |
| "end": 525, |
| "text": "(Sutskever et al., 2014;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 526, |
| "end": 548, |
| "text": "Bahdanau et al., 2015;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 549, |
| "end": 570, |
| "text": "Gehring et al., 2017;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 571, |
| "end": 591, |
| "text": "Vaswani et al., 2017", |
| "ref_id": "BIBREF47" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In order to enable reproducible HPO research on NMT tasks, we adopt a benchmark procedure based on ''table-lookup''. This approach was recently introduced to neural architecture search by Ying et al. (2019) , and to hyperparameter optimization by . First, we train an extremely large number of NMT models with diverse hyperparameter settings and record their performance metrics (e.g., BLEU, decoding time) in a table. Then, we constrain our HPO methods to sample from this finite set of models. This allows us to simply ''look-up'' their precomputed performance metrics, and amortizes the burden of computation: As long as we ensure that Figure 1 : Left: Histogram of BLEU scores that show wide variance in performance for a base NMT system (transformer) with different hyperparameters (e.g., BPE operations, # of layers, initial learning rate). Right: Scatterplot of BLEU and decoding time with different hyperparameters. Gold stars represent the Pareto-optimal systems.", |
| "cite_spans": [ |
| { |
| "start": 188, |
| "end": 206, |
| "text": "Ying et al. (2019)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 639, |
| "end": 647, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "we have trained and pre-computed a large number of representative NMT models beforehand, HPO algorithm developers no longer need to deal with the cost of training NMT. Importantly, this kind of benchmark significantly speeds up the HPO experiment turnover time, enabling fast repeated trials for rigorous tests and facilitates detailed error analysis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The main contributions of this work are:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "1. Dataset: We release a benchmark dataset 1 for comparing HPO methods on NMT models. This ''table-lookup'' HPO dataset supports both single-objective and multiobjective optimization of translation accuracy and decoding time (Section 3). Specifically, we trained a total of 2,245 Transformers (Vaswani et al., 2017) on six different corpora (with a cost of approximately 1,547 GPU days), and collected all pairs of hyperparameter settings and corresponding performance metrics.", |
| "cite_spans": [ |
| { |
| "start": 293, |
| "end": 315, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF47" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We provide three kinds of metrics for evaluating HPO methods, based on different computational budgets (Section 4). We also demonstrate error analysis techniques that are enabled by this ''table-lookup'' framework, which provide insights into algorithm behavior (Section 7).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation protocols:", |
| "sec_num": "2." |
| }, |
| { |
| "text": "We benchmark the performance of several HPO methods on our dataset (Section 6). These include Bayesian optimization as well as a novel graph-based method that exploits the structure of the hyperparameter space (Section 5). We also extend these methods to handle the 1 https://github.com/Este1le/hpo_nmt. multiobjective optimization of both BLEU and decoding time. These experiments illustrate how to utilize the dataset to rigorously evaluate HPO for NMT.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HPO method benchmarks:", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Given a machine learning algorithm with H hyperparameters, we denote the domain of the h-th hyperparameter by \u039b h and the overall hyperparameter configuration space as \u039b = \u039b 1 \u00d7 \u039b 2 \u00d7 . . . \u039b H . When trained with a hyperparameter setting \u03bb \u2208 \u039b on data D train , the algorithm's performance metric on some validation data D valid is f (\u03bb) :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HPO Problem Definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "= V (\u03bb, D train , D valid ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HPO Problem Definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In the context of NMT, f (\u2022) or V(\u2022) could be the perplexity, translation accuracy (e.g., BLEU score), or decoding time on D valid . In general, f (\u2022) is computationally expensive to obtain; it requires training a model to completion, then evaluating some performance metric on a validation set. For purposes of exposition, we assume that lower f (\u2022) is better, so we might define f (\u2022) as 1 \u2212 BLEU. The goal of hyperparameter optimization is then to find a \u03bb \u22c6 = arg min \u03bb\u2208\u039b f (\u03bb), with as few evaluations of f (\u2022) as possible. An HPO problem can be challenging for three reasons: (a) \u039b may be a combinatorially large space, prohibiting grid search over hyperparameters. (b) f (\u2022) may be expensive to compute, so there is a tight budget on how many evaluations of f (\u2022) are allowed. (c) f is not a continuous function and no gradient information can be exploited, forcing us to view the arg min as a blackbox discrete search problem. NMT HPO search exhibits all these conditions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HPO Problem Definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "One class of algorithms that tackles the HPO problem is sequential model-based optimization (SMBO), illustrated in Figure 2 . SMBO approximates f with a cheap-to-evaluate surrogate modelf (Feurer and Hutter, 2019; Luo, 2016; . SMBO starts by querying f with initial hyperparameters {\u03bb init } and recording the resulting (\u03bb init , f (\u03bb init )) pairs. Then, it iteratively (1) fits the surrogatef on pairs observed so far; (2) gets the predictionsf (\u03bb i ) for unlabeled/unobserved hyperparameters; and (3) selects a promising \u03bb p to query next based on these predictions and an acquisition function, whose role is to trade off exploration in \u039b with high model uncertainty and exploitation in \u039b with lowf (\u2022). Figure 2 : SMBO framework. The part shaded in light blue contains two ingredients required for implementing a SMBO method: the surrogate model and the acquisition function, for which we will present two choices in Section 5.", |
| "cite_spans": [ |
| { |
| "start": 188, |
| "end": 213, |
| "text": "(Feurer and Hutter, 2019;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 214, |
| "end": 224, |
| "text": "Luo, 2016;", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 115, |
| "end": 123, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 707, |
| "end": 715, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "HPO Problem Definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Evolutionary algorithms (Eberhart and Shi, 1998; Simon, 2013) are also used to solve HPO problems. Unlike SMBO, they do not approximate f with a surrogatef ; rather, they directly sample hyperparameters with high f (\u2022) from a population and recombine them to form the next query. 2", |
| "cite_spans": [ |
| { |
| "start": 24, |
| "end": 48, |
| "text": "(Eberhart and Shi, 1998;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 49, |
| "end": 61, |
| "text": "Simon, 2013)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HPO Problem Definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "3 Table-Lookup HPO Datasets", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HPO Problem Definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To evaluate a newly devised HPO algorithm, one needs to run each component of the loop in Figure 2 . However, the ''query'' step is computationally expensive: We need to train a new NMT system each time we sample a new hyperparameter.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 90, |
| "end": 98, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Table-Lookup Framework", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The idea of table lookup is to simply pre-train a large set of I NMT systems and record the pairs", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Table-Lookup Framework", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "{\u03bb i , f (\u03bb i )} i=1,.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Table-Lookup Framework", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "..,I in a table. Thus, when running the loop in Figure 2 , the HPO algorithm developer can look up f (\u03bb i ) whenever necessary, without having to train a NMT model from scratch. This significantly speeds up the experimental process. The advantages are: 1. One can perform multiple random trials of the same algorithm, to test robustness.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 48, |
| "end": 56, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Table-Lookup Framework", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "2. One can perform comparisons with more baseline algorithms, to make stronger claims.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Table-Lookup Framework", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "3. One can perform the same experiment under different budget constraints, to simulate different real-world use cases. Promising candidates may be further developed and evaluated. The most robust one will be selected to apply to the target MT data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Table-Lookup Framework", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "4. One can track the progress of an experiment with respect to oracle results, allowing for more detailed error analysis of HPO.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Table-Lookup Framework", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To be effective, table lookup depends on two important assumptions: First, the table has to be sufficiently large to cover the space of hyperparameters \u039b. Second, the HPO algorithm needs to be modified to sample from the finite set of hyperparameters in the table; this is usually easy to implement but the assumption is that finite-sample results will generalize.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Table-Lookup Framework", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "There exist many choices of HPO algorithm, which can be evaluated or further developed on our lookup tables. Figure 3 illustrates this process. The performance of HPO algorithm candidates on various MT datasets serves as the basis for HPO selection. The selected HPO algorithm can then be applied on new MT datasets.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 109, |
| "end": 117, |
| "text": "Figure 3", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "HPO Algorithm Selection/Development", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "There are two kinds of generalization effects at play: (1) generalization of an HPO algorithm across MT datasets, and (2) generalization of MT models and their associated hyperparameters across MT datasets. We mainly care about (1) in the algorithm development process, which is why we opt to provide six distinct datasets described in Section 3.3 (as opposed to, e.g., 1 dataset trained on large MT data). If an HPO algorithm performs efficiently in finding good hyperparameter configurations on many MT datasets, then we can more reasonably believe that it will run quickly on a new dataset, regardless of the underlying MT data characteristics. Even if the best configuration on one MT dataset does not transfer to another, a robust HPO algorithm should still be capable of finding good hyperparameters because the algorithm learns from scratch on each dataset independently.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HPO Algorithm Selection/Development", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To create a robust HPO benchmark, we trained NMT models on six different parallel corpora, which exhibit a variety of characteristics: TED Talks: We trained Chinese-English (zh-en) and Russian-English (ru-en) models on the datasplit of Duh (2018) . This is a mid-resource setup, where D train consists of 170k lines for zh-en and 180k lines for ru-zh. D valid has 1,958 sentences and is multiway parallel for both language-pairs. WMT2019 Robustness task (Li et al., 2019) :", |
| "cite_spans": [ |
| { |
| "start": 236, |
| "end": 246, |
| "text": "Duh (2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 454, |
| "end": 471, |
| "text": "(Li et al., 2019)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MT Data and Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We trained models on Japanese-English data, in both directions (ja-en, en-ja). D train has 4 M lines from a mix of domains. D valid is a concatenation of 4k mixed-domain sentences and 1k Reddit sentences, for a total of 5,405 lines. The goal of the Robustness task is to test how NMT systems perform on non-standard and noisy text (e.g., Reddit). Low Resource tasks: We trained models using the IARPA MATERIAL datasets for Swahili-English (sw-en) and Somali-English (so-en). D train consists of only 24k lines for both language pairs (BUILD set), and D valid consists of 2675 lines (ANALYSIS2 set).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MT Data and Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Although there are many potential MT datasets we could choose from, we believe these six datasets form a good representative set. It ranges from high-to-low resource; it contains both noisy and clean settings. These datasets also have different levels of similarity-for example, zh-en and ru-en TED talks use the same multiway parallel D valid , so one could ask whether the optimal hyperparameters transfer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MT Data and Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The text is tokenized by Jieba for Chinese, by Kytea for Japanese, and by the Moses tokenizer for the rest. Byte pair encoding (BPE) segmentation (Sennrich et al., 2016) is learned and applied separately for each side of bitext. We train Transformer NMT models with Sockeye 3 (Hieber et al., 2017) , focusing on these hyperparameters:", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 169, |
| "text": "(Sennrich et al., 2016)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 276, |
| "end": 297, |
| "text": "(Hieber et al., 2017)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MT Data and Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 preprocessing configurations: number of BPE symbols 4 (bpe)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MT Data and Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 training settings: initial learning rate (init lr) for the Adam optimizer", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MT Data and Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 architecture designs: 5 number of layers (#layers), embedding size (#embed), number of hidden units in each layer (#hidden), number of heads in self-attention (#att heads).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MT Data and Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "These hyperparameters are chosen because they significantly affect both accuracy and speed of the resulting NMT. Other hyperparameters are kept at their Sockeye defaults. 6 Table 1 shows our overall hyperparameter space \u039b; in total among all six datasets, we have 1,983 models; Table 2 shows the exact number of models per dataset, along with the best models and their hyperparameter settings. 7", |
| "cite_spans": [ |
| { |
| "start": 171, |
| "end": 172, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 173, |
| "end": 180, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 278, |
| "end": 285, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "MT Data and Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Rationale for Hyperparameter Values: There are various design trade-offs in deciding the range and granularity of hyperparameter values. First, we might expand on a wider range of values (e.g., change #hidden = {1024, 2048} to {512, 1024, 2048, 4096}). The effect of this is that we test the HPO algorithm on a wider range of inputs, with potentially more variability in metrics like BLEU and inference speed. Second, we might expand on a more finegrained range of values (e.g., change #hidden = {1024, 2048} to {1024, 1536, 2048}). This might result in smoother metrics, making it easier for HPO algorithms to learn. Although wider range and finer granularity are desirable properties for a HPO dataset, each additional value causes an exponential increase in the number of models because of the cross-product of all values. In general, we think Table 2 : For each language pair, we report the number of NMT systems trained on it, the oracle best BLEU we obtained, and its corresponding hyperparameter configuration.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 847, |
| "end": 854, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "MT Data and Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We train all models on D train until they converge in terms of perplexity on D valid . We then record various performance measurements:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objectives: Accuracy and Cost", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "\u2022 Translation accuracy: BLEU (Papineni et al., 2002) and perplexity on D valid .", |
| "cite_spans": [ |
| { |
| "start": 29, |
| "end": 52, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objectives: Accuracy and Cost", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "\u2022 Computational cost: GPU wall clock time for decoding D valid , number of updates for the model to converge, GPU memory used for training, total number of model parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objectives: Accuracy and Cost", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "In this paper, we use BLEU on D valid for single-objective experiments; we use BLEU and decoding time for multiobjective experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objectives: Accuracy and Cost", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We might be interested in seeing whether good configurations are always good across datasets. This can be done by ranking configurations by BLEU for each dataset, then measuring correlation between rankings. We show the Spearman's correlation coefficient in Figure 4 . NMT systems with same language pairs (ja-en vs. en-ja) are highly correlated. On the contrary, other pairs show low correlation (0.084 for ja-en vs. so-en), implying the need to run HPO on new datasets separately.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 258, |
| "end": 266, |
| "text": "Figure 4", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hyperparameter Importance/Correlation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "The table-lookup approach also enables indepth analyses of how hyperparameters generally affect system performance. Following , we assess the importance of hyperparameters with fANOVA, which computes the variation in BLEU when changing a specific hyperparameter with values of all the other hyperparameters fixed. In Figure 5 , on en-ja, when considering only the top performing NMT models (top left), #att heads, init lr, and #embed impact BLEU the most, over the entire configuration space (top middle), #embed is the distinguishing factor. The analysis can be extended to pairs of hyperparameters, where we observe the interaction of init lr and #embed being important ( Figure 5 bottom left). Questions may arise over whether the results on en-ja can be taken as general conclusions. We find that it is dataset-dependent-hyperparameter importance ranking differs across language pairs, and is dependent on the range and granularity of hyperparameters considered. As shown in the right column of Figure 5 , bpe is the most important hyperparameter for sw-en, instead of #embed. This shows the diversity of our selected MT datasets and the hyperparameter importance analysis is a good tool for probing the search space characteristics of these datasets.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 317, |
| "end": 325, |
| "text": "Figure 5", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 674, |
| "end": 682, |
| "text": "Figure 5", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 999, |
| "end": 1007, |
| "text": "Figure 5", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hyperparameter Importance/Correlation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Our table-lookup dataset enables reproducible and efficient benchmarks for HPO of NMT systems. Li and Talwalkar (2019) introduce two notions of reproducibility: exact reproducibility (the reproducibility of reported experimental results); and broad reproducibility (the generalization of the experimental results). 9 Our benchmarks are exact reproducible in the sense that we provide the tables that record all model results (Section 3.3) and the code to run and evaluate our HPO algorithms (Section 6). However, they are not guaranteed to be broad reproducible, because the generalizability of the results might be restricted due to fixed collections of hyperparameter configurations, the variance associated with multiple runs, and the unknown best representative set of MT data. As a result, in this work, we should be careful to not make general conclusions from the observations, but to show how the dataset can be potentially used in facilitating HPO research. ", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 118, |
| "text": "Li and Talwalkar (2019)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reproducible and Efficient Benchmarks", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "To assess HPO method performance, we measure the runtime to reach a quality indicator (e.g., BLEU) target value. The runtime is defined as the number of NMT models trained, or equivalently the number of function evaluations f (\u03bb) in Figure 2 . We consider two ways to measure the HPO performance: fixed-target and fixedbudget.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 233, |
| "end": 241, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation Protocols", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For single-objective optimization, we have:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Single-Objective Evaluation Metrics", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 fixed-target best (ftb): We fix the quality indicator value to the best value in the dataset and measure runtime to reach this target.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Single-Objective Evaluation Metrics", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 fixed-target close (ftc): We measure the runtime to reach a target that is slightly less than the oracle best. This is useful when one can tolerate some performance loss.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Single-Objective Evaluation Metrics", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 fixed-budget (fb):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Single-Objective Evaluation Metrics", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We fix the budget of function evaluations and measure the difference between the oracle best quality indicator value (e.g., oracle best BLEU) in the dataset vs. the best value achieved by systems queried by the HPO method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Single-Objective Evaluation Metrics", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The fixed-budget metric asks what is the best possible system assuming a hard constraint on training resources. The fixed-target metrics ask how much training information is needed to find the best (or approximate best) system in the dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Single-Objective Evaluation Metrics", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In practice, one might desire to optimize multiple objectives, such as translation accuracy and speed. Suppose we have J objectives, and they can be jointly represented as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiobjective Evaluation Metrics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "F (\u03bb) = [f 1 (\u03bb), f 2 (\u03bb), \u2022 \u2022 \u2022 , f J (\u03bb)].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiobjective Evaluation Metrics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "As it is unlikely that any one \u03bb will optimize all objectives simultaneously, we adopt the concept of Pareto optimality (Godfrey et al., 2007) . In the context of minimization, \u03bb is said to dominate \u03bb \u2032 , that is, \u03bb \u227a \u03bb \u2032 , if f j (\u03bb) \u2264 f j (\u03bb \u2032 ) \u2200j and f j (\u03bb) < f j (\u03bb \u2032 ) for at least one j. If nothing dominates \u03bb, we call it the Pareto optimal solution. The set of all Pareto solutions is referred to as the Pareto front, that is, {\u03bb | \u2204\u03bb \u2032 \u2208 \u039b : \u03bb \u2032 \u227a \u03bb}. Intuitively, these are solutions satisfying all possible trade-offs in the multiobjective space. Figure 1 shows an example of Pareto solutions that maximize BLEU and minimize speed.", |
| "cite_spans": [ |
| { |
| "start": 120, |
| "end": 142, |
| "text": "(Godfrey et al., 2007)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 560, |
| "end": 568, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Multiobjective Evaluation Metrics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For multiobjective optimization, the quality indicator becomes the Pareto front, thus we have:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiobjective Evaluation Metrics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 fixed-target all (fta): We measure the runtime to find all points on the Pareto front.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiobjective Evaluation Metrics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 fixed-target one (fto): We measure the runtime to get at least one Pareto point.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiobjective Evaluation Metrics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 fixed-budget (fbp): We fix the budget of function evaluations and measure the number of Pareto-optimal points obtained.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiobjective Evaluation Metrics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In the literature, a common way to compare HPO methods is to plot quality indicator value as a function of runtime on a graph (e.g., see Figure 6 ). The proposed metrics can be viewed as summary statistics drawn as line thresholds on such graphs (Hansen et al., 2016) , where the budget/target is set to a value appropriate for the use case.", |
| "cite_spans": [ |
| { |
| "start": 246, |
| "end": 267, |
| "text": "(Hansen et al., 2016)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 137, |
| "end": 145, |
| "text": "Figure 6", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Multiobjective Evaluation Metrics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Some HPO methods may be sensitive to randomness in initial seeds {\u03bb init } (Feurer et al., 2015) . We suggest that repeated randomized trials are important for a rigorous evaluation, and this is only feasible with a table-lookup dataset. In our experiments, we average results of HPO runs across 100 trials, where each trial is seeded with a different set of 3 random initial hyperparameter settings.", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 96, |
| "text": "(Feurer et al., 2015)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Repeated Trials", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We now describe two HPO/SMBO methods used in our experiments: Bayesian optimization 10 is a popular method. Graph-based SMBO is a novel method that adapts ideas in graph-based semi-supervised learning to the HPO problem.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Given a target function f : \u039b \u2192 R, Bayesian optimization (Brochu et al., 2010; Shahriari et al., 2015; Frazier, 2018) aims to find an input \u03bb \u22c6 \u2208 arg min \u03bb\u2208\u039b f (\u03bb). It models f with a posterior probability distribution p(f | L), where L is a set of observed points. This posterior distribution is updated each time we observe f at a new point \u03bb p . The utility of each candidate 10 There are works adopting Bayesian optimization for HPO of statistical machine translation systems (Miao et al., 2014; Beck et al., 2016) . point is quantified by an acquisition function a : \u039b \u2192 R, and \u03bb p \u2208 arg max \u03bb\u2208\u039b a(\u03bb). In practice, a prominent choice for p(f | L) is Gaussian process regression, and a common acquisition function is Expected Improvement (EI).", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 78, |
| "text": "(Brochu et al., 2010;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 79, |
| "end": 102, |
| "text": "Shahriari et al., 2015;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 103, |
| "end": 117, |
| "text": "Frazier, 2018)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 480, |
| "end": 499, |
| "text": "(Miao et al., 2014;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 500, |
| "end": 518, |
| "text": "Beck et al., 2016)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bayesian Optimization (BO)", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "A Gaussian Process (GP) (Rasmussen, 2003) G(m(\u03bb), k(\u03bb, \u03bb \u2032 )) is a collection of random variables such that any finite subset of them follows a multivariate Gaussian distribution. A GP is fully specified by a mean m(\u03bb) and a covariance function or a kernel k(\u03bb, \u03bb \u2032 ), and the sufficient statistics of the posterior predictive distribution, \u00b5(\u2022) 11 and \u03a3(\u2022), can be computed with", |
| "cite_spans": [ |
| { |
| "start": 24, |
| "end": 41, |
| "text": "(Rasmussen, 2003)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "5.1.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u00b5(\u03bb) = K T \u22c6 K \u22121 y, (1) \u03a3(\u03bb) = k(\u03bb, \u03bb) \u2212 K T \u22c6 K \u22121 K \u22c6 ,", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Gaussian Process Regression", |
| "sec_num": "5.1.1" |
| }, |
| { |
| "text": "where y = [. . . ; f (\u03bb); . . . , ], K \u22c6 = k(\u039b observed , \u03bb) and K = k(\u039b observed , \u039b observed ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "5.1.1" |
| }, |
| { |
| "text": "In the case of HPO, the kernel k() measures the similarity between hyperparameter configurations and \u00b5() is a prediction of the f () values of not-evaluated hyperparameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "5.1.1" |
| }, |
| { |
| "text": "The EI score is defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Expected Improvement (EI)", |
| "sec_num": "5.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "a EI (\u03bb) = E[max(f (\u03bb) \u2212 f min , 0)],", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Expected Improvement (EI)", |
| "sec_num": "5.1.2" |
| }, |
| { |
| "text": "where f min is the best observed value thus far, andf (\u03bb) = \u00b5(\u03bb). When the predictionf (\u03bb) follows a normal distribution as in the GP, EI can be computed in a closed form. Our acquisition function computes EI for each point in the grid of hyperparameters, and queries the one with largest value.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Expected Improvement (EI)", |
| "sec_num": "5.1.2" |
| }, |
| { |
| "text": "Semi-supervised learning addresses the question how to utilize a handful of labeled data and a large amount of unlabeled data to improve prediction accuracy. Graph-based semi-supervised learning (GBSSL, Zhu et al., 2003; Zhu, 2005) describes the structure of data with a graph, where each vertex is a data point and each weighted edge reflects the similarity between vertices. It makes a smoothness assumption that neighbors connected by edges tend to have similar labels, and labels can propagate throughout the graph. In SMBO surrogate modeling, we hope to make predictions for the unlabeled or not-evaluated points in the hyperparameter space based on the information of labeled or evaluated points. If we pre-define the set of all potential points, then this becomes highly related to semisupervised learning. From this point of view, we propose GBSSL equipped with suitable acquisition functions as a new SMBO method for searching over a grid of representative hyperparameter configurations.", |
| "cite_spans": [ |
| { |
| "start": 195, |
| "end": 220, |
| "text": "(GBSSL, Zhu et al., 2003;", |
| "ref_id": null |
| }, |
| { |
| "start": 221, |
| "end": 231, |
| "text": "Zhu, 2005)", |
| "ref_id": "BIBREF49" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph-Based SMBO (GB)", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Suppose we have a graph G = (V, E) with nodes V corresponding to n points, of which L denotes the set of labeled points", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph-Based Regression", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "{(\u03bb 1 , f (1)), \u2022 \u2022 \u2022 , (\u03bb l , f (l))}, where f (i) is short for f (\u03bb i ), and U denotes the set of unlabeled points {\u03bb l+1 , \u2022 \u2022 \u2022 , \u03bb l+u }, where n = l + u.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph-Based Regression", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "The edges E are represented by a n \u00d7 n weight matrix W . For instance, W can be given as the radial basis function (RBF):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph-Based Regression", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "w ij = exp \u2212 1 2\u03c3 2 H d=1 (\u03bb id \u2212 \u03bb jd ) 2 . (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph-Based Regression", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "Note that G is not necessarily fully connected; in practice, kNN graphs with a small k turn out to perform well, where nodes i, j are connected if i is in j's k-nearest-neighborhood or vice versa. 12 Because closer points are assumed to have similar labels, we define the energy function as:", |
| "cite_spans": [ |
| { |
| "start": 197, |
| "end": 199, |
| "text": "12", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph-Based Regression", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "E(f ) = 1 2 i,j w ij (f (i) \u2212 f (j)) 2 ,", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Graph-Based Regression", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "and we constrain f (i), i \u2208 L or f L to be true labels and aim to find f (i), i \u2208 U or f U that minimizes the energy. We define a diagonal matrix D, where D ii = j W ij and the combinatorial Laplacian \u2206 = D \u2212 W , Equation (5) can then be rewritten to E(f ) = f T \u2206f . If we partition the Laplacian matrix into blocks:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph-Based Regression", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2206 = \u2206 LL \u2206 LU \u2206 U L \u2206 U U ,", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Graph-Based Regression", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "12 In experiments, based on initial tuning, we set kNN so that each point has on average n 7 neighbors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph-Based Regression", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "we can predict the f () values for unlabeled points by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph-Based Regression", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "f U = \u2212\u2206 \u22121 U U \u2206 U L f L . (7)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph-Based Regression", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "We propose a novel acquisition function called expected influence that exploits the graph structure. The idea is to query the point such that, if its f () is observed, has the highest potential to change the f () of all other points as we re-run label propagation through the graph. We first scale the labels on the graph f (i) \u2208 R to be between 0 or 1. The best labeled point is set to 1; for the other labeled points, we first compute the probability that a random walk starting at 1 reaches it, then set the label to be 1 if the probability is larger than 0.5 and 0 otherwise.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Expected Influence (EIF)", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "If we were to query an unlabeled point k, there are two scenarios: Its label is either 1 with probability f (k) or 0 with probability 1\u2212f (k). For each scenario, we then consider including k as a newly added ''labeled'' point and re-running label propagation. f +(\u03bb k ,1) (i) are the new predictions for points i in the scenario where k is added with label 1. If k is an influencer in the positive direction, this means that many points i will now have large f +(\u03bb k ,1) (i); otherwise, f +(\u03bb k ,1) (i) might be small on average in magnitude. On the other hand, suppose we add k with label 0 and run label propagation again to obtain new predictions f +(\u03bb k ,0) (i). If k is an influencer in the negative direction, this means that f +(\u03bb k ,0) (i) will be small (or conversely 1 \u2212 f +(\u03bb k ,0) (i) will be large).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Expected Influence (EIF)", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "We can now define an influence score and have the acquisition function seeking point p that maximizes the following:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Expected Influence (EIF)", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "a EIF (\u03bb k ) = (1 \u2212 f (k)) n i=1 (1 \u2212 f +(\u03bb k ,0) (i)) +f (k) n i=1 f +(\u03bb k ,1) (i)", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Expected Influence (EIF)", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "Intuitively, we try adding each unlabeled point as either a desirable point (label 1) or undesirable point (0). We measure whether this addition changes the result of GB regression, and finally query the hyperparameter that is expected to cause the most significant change.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Expected Influence (EIF)", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "There is a connection between the BO and GB due to the link between GPs and graphs. The", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BO vs. GB", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Algorithm 1: Multiobjective SMBO Input : Initial seeds {\u03bb init }, Budget B Output: Pareto-front approximation P 1 L \u2190 {\u2022 \u2022 \u2022 (\u03bb init , F (\u03bb init )) \u2022 \u2022 \u2022 } 2 while b <= B do 3 P \u2190 Compute the Pareto front of L 4 Fit surrogate modelsf 1 , \u2022 \u2022 \u2022 ,f J on L 5", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BO vs. GB", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Select a new point \u03bb p based on an infill criterion and surrogate model predictions", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BO vs. GB", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "6 L \u2190 L \u222a {(\u03bb p , F (\u03bb p ))} 7 end 8 return P", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BO vs. GB", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "GB method defines a Gaussian random field on the graph, which is a multivariate Gaussian distribution on the nodes. This is equivalent to ''finite set'' GPs. Zhu (2005) showed that the kernel matrix K of the finite set GP is equivalent to the inverse of a function of the graph Laplacian \u2206, that is, K = (2\u03b2(\u2206 + I \u03c3 2 )) \u2212113 . The difference between the finite set GP and GP is that the kernel matrix of the former is defined on L \u222a U , while the latter is defined on \u039b. As a semisupervised method, the label propagation rule of GB (Equation 7) shows that all the nodes on the graph contribute to the prediction of a single unlabeled node, whereas for GP, the posterior predictive distribution of a new point does not depend on other unlabeled points as shown by Equation (1).", |
| "cite_spans": [ |
| { |
| "start": 158, |
| "end": 168, |
| "text": "Zhu (2005)", |
| "ref_id": "BIBREF49" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BO vs. GB", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The main advantage of GB is that it offers flexibility to build graphs over the search space. For instance, one can build a graph with configurations from different model architectures, for example, RNN, CNN, and Transformers. Nodes of the same architecture might gather into a cluster, and clusters can be connected with each other. One can also manipulate the edge weights by manually defined heuristics. One example of such rules could be Euclidean distance scaled by hyperparameter importance. We leave this as future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BO vs. GB", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The theoretical caveat of the GB method is that it is restricted to a discrete search space defined by a graph. If a dense grid is desired to mimic a continuous search space, increasing time and space complexity would make it a less efficient method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BO vs. GB", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "For multiobjective optimization, we can use the same surrogate models to estimate eachf j independently; but we need a new acquisition 13 \u03b2 and \u03c3 are adjustable parameters. function that considers the Pareto front. Various methods have been proposed (Zitzler and Thiele, 1998; Ponweiser et al., 2008; Picheny, 2015; Shah and Ghahramani, 2016; Svenson and Santner, 2016) . Here, we adopt the expected hypervolume improvement (EHVI) method (Emmerich et al., 2011) , which is a generalization of EI. EHVI as an infill criterion and can be combined with different surrogate models. Algorithm 1 provides pseudo-code for the framework.", |
| "cite_spans": [ |
| { |
| "start": 250, |
| "end": 276, |
| "text": "(Zitzler and Thiele, 1998;", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 277, |
| "end": 300, |
| "text": "Ponweiser et al., 2008;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 301, |
| "end": 315, |
| "text": "Picheny, 2015;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 316, |
| "end": 342, |
| "text": "Shah and Ghahramani, 2016;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 343, |
| "end": 369, |
| "text": "Svenson and Santner, 2016)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 438, |
| "end": 461, |
| "text": "(Emmerich et al., 2011)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiobjective Optimization", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "We evaluate HPO methods on six NMT tasks with the provided benchmark dataset and report their performance measured by three runtime-based assessment metrics mentioned in Section 4. The code base is provided to ensure reproducibility. 14", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "6" |
| }, |
| { |
| "text": "For single-objective optimization, our goal is to find a hyperparameter configuration giving the highest BLEU score over a predefined grid.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Single-Objective Optimization", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "We run the comparison with two surrogate models, two kernels, 15 and two acquisition functions, leading to the following HPO systems, where all the GB systems are introduced by this work:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Comparison", |
| "sec_num": "6.1.1" |
| }, |
| { |
| "text": "\u2022 RS: random search (Bergstra and Bengio, 2012) , which uniformly samples hyperparameter configurations at random over the grid.", |
| "cite_spans": [ |
| { |
| "start": 20, |
| "end": 47, |
| "text": "(Bergstra and Bengio, 2012)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Comparison", |
| "sec_num": "6.1.1" |
| }, |
| { |
| "text": "\u2022 BO EI M: GP-based BO with Mat\u00e9rn52 covariance function and expected improvement as acquisition function.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Comparison", |
| "sec_num": "6.1.1" |
| }, |
| { |
| "text": "\u2022 BO EI R: GP-based BO with RBF kernel and EI as acquisition function.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Comparison", |
| "sec_num": "6.1.1" |
| }, |
| { |
| "text": "\u2022 GB EI M: GB with Mat\u00e9rn52 kernel and EI as acquisition function. 16", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Comparison", |
| "sec_num": "6.1.1" |
| }, |
| { |
| "text": "\u2022 Adjusting initialization can result in a noticeable variance on performance. We suggest that researchers experiment with enough random trials when evaluating HPO systems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Comparison", |
| "sec_num": "6.1.1" |
| }, |
| { |
| "text": "We now show benchmarks for multiobjective optimization. Our goal is to search for configurations achieving higher BLEU and less decoding time.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiobjective Optimization", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "We run the comparison on the following systems, where GB systems are introduced by this work:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Comparison", |
| "sec_num": "6.2.1" |
| }, |
| { |
| "text": "\u2022 RS: random search, uniformly samples the configurations at random.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Comparison", |
| "sec_num": "6.2.1" |
| }, |
| { |
| "text": "\u2022 BO M: GP-based BO equipped with Mat\u00e9rn kernel and EHVI as the infill criterion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Comparison", |
| "sec_num": "6.2.1" |
| }, |
| { |
| "text": "\u2022 BO R: GP-based BO with RBF kernel and EHVI.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Comparison", |
| "sec_num": "6.2.1" |
| }, |
| { |
| "text": "\u2022 GB M: GB equipped with Mat\u00e9rn kernel and EHVI as the infill criterion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Comparison", |
| "sec_num": "6.2.1" |
| }, |
| { |
| "text": "\u2022 GB R: GB with RBF kernel and EHVI. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Comparison", |
| "sec_num": "6.2.1" |
| }, |
| { |
| "text": "The multiobjective optimization evaluation results are summarized in Table 4 :", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 69, |
| "end": 76, |
| "text": "Table 4", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6.2.2" |
| }, |
| { |
| "text": "\u2022 RS is a bad choice for multiobjective optimization, if one aims to quickly collect as many Pareto-optimal configurations as possible: To get all the true optima, RS usually needs to go through the whole search space (fta), and with fixed budget it obtains much fewer Pareto points than other methods (fbp).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6.2.2" |
| }, |
| { |
| "text": "\u2022 BO is generally superior across datasets. On sw-en, it only spends less than half of the time that RS takes to get the Pareto set (344 vs. 719), and can find 8.6 more Pareto points than RS with 200 NMT models evaluated.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6.2.2" |
| }, |
| { |
| "text": "\u2022 GB provides comparable performance as BO on four datasets, whereas on sw-en and so-en, BO noticeably outperforms GB, which might not be a perfect solution for a multiobjective task. 19 Budget is adjusted based on the size of search space.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6.2.2" |
| }, |
| { |
| "text": "For single-objective optimization, we compare the best BLEU and mean squared error (MSE), which is the averaged squared difference between ground-truth BLEU and predictions, achieved by different HPO methods across time. We can see from Figure 6 (left) that BO and GB converge much faster than RS, and GB is superior over time. This could be partly explained by Figure 6 (right), GB can already fit the data well in the beginning, while BO starts from a much larger MSE and decreases gradually.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 237, |
| "end": 245, |
| "text": "Figure 6", |
| "ref_id": "FIGREF4" |
| }, |
| { |
| "start": 362, |
| "end": 370, |
| "text": "Figure 6", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6.2.2" |
| }, |
| { |
| "text": "For multiobjective optimization, we show the evolution of Pareto-optimal fronts in Figure 7 . There is a trend that Pareto fronts are moving towards the lower right corner at each iteration, verifying the effectiveness of our HPO methods.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 83, |
| "end": 91, |
| "text": "Figure 7", |
| "ref_id": "FIGREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6.2.2" |
| }, |
| { |
| "text": "NMT training might not be deterministic due to the random initialization of model parameters. All the experimental results so far are obtained by a single run using one random seed. In order to explore the variance of the model performance induced by initialization effects, we fix the hyperparameter configurations and train models initialized with various random seeds. Specifically, we select five hyperparameter configurations, 20 and retrained them for additional five times each with different random initializations. We did this for two datasets: the low-resource sw-en task and the larger WMT2019 ja-en task. The results on ja-en and sw-en are shown in Figure 8 . The variance of performance is kept in a small range in most cases and the ranking of configurations remains about the same when different random seeds are applied. Based on this observation, we think that it is a reasonable strategy to use a single run to build table-lookup datasets; but at the same time it should be understood that the BLEU scores in the lookup table are only approximations. We note that there can be a few cases where variance is large, and this might be best addressed by inventing HPO methods that explicitly accounts for such uncertainty.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 661, |
| "end": 669, |
| "text": "Figure 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Random Initialization", |
| "sec_num": "7.2" |
| }, |
| { |
| "text": "To alleviate the computational burden for benchmarking HPO methods and to improve research Figure 8 : BLEU of ja-en and sw-en models trained with six random seeds. Circles with different colors stand for different random seeds.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 91, |
| "end": 99, |
| "text": "Figure 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "reproducibility, several studies have explored the table-lookup framework. published a mix of datasets focusing on feed forward neural networks. Ying et al. (2019) released a dataset of convolutional architectures for image classification problems. To the best of our knowledge, this work is the first that focuses on NMT and transformer models.", |
| "cite_spans": [ |
| { |
| "start": 145, |
| "end": 163, |
| "text": "Ying et al. (2019)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "One challenge with table-lookup is that sufficient coverage of the hyperparameter grid is assumed. Eggensperger et al. (2015) and propose using a predictive metamodel trained on a table-lookup benchmark to approximate hyperparameters that are not in the table. This is an interesting avenue for future work.", |
| "cite_spans": [ |
| { |
| "start": 99, |
| "end": 125, |
| "text": "Eggensperger et al. (2015)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Studies on HPO for NMT are scarce. Qin et al. 2017propose an evolution strategy-based HPO method for NMT. So et al. (2019) apply NAS to Transformer on NMT tasks. There is also work on empirically exploring hyperparameters and architectures of NMT systems (Bahar et al., 2017; Britz et al., 2017; Lim et al., 2018) , though the focus is on finding general best-practice configurations. This differs from the goal of HPO, which aims to find the best configuration specific to a given dataset.", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 122, |
| "text": "So et al. (2019)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 255, |
| "end": 275, |
| "text": "(Bahar et al., 2017;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 276, |
| "end": 295, |
| "text": "Britz et al., 2017;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 296, |
| "end": 313, |
| "text": "Lim et al., 2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "In this paper, we presented a benchmark dataset for hyperparameter optimization of neural machine translation systems. We provided multiple evaluation protocols and analysis approaches for comparing HPO methods. We benchmarked Bayesian optimization and a novel graph-based semi-supervised learning method on the dataset for both single-objective and multiobjective optimization. Our hope is that this kind of dataset will facilitate reproducible research and rigorous evaluation of HPO for complex and expensive models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "9" |
| }, |
| { |
| "text": "We focus on SMBO methods in this paper, but note that our dataset is amenable to any HPO method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/awslabs/sockeye.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Same number of BPE operations is used for both sides. 5 Same values are used for encoder and decoder.6 In this paper, we only focused on integer and real-valued hyperparameters. Categorical hyperparameters need special treatment for most HPO algorithms, thus are not considered.7 Note that not all possible hyperparameter configurations are included in the dataset: We excluded ones where training failed or clearly did not learn (e.g., achieved \u2248 0 BLEU).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The ranking is computed only on the subset of MT systems common in all datasets. For this, we consider 30k bpe (for zh, ru, ja, en) to be equivalent to 32k bpe (for sw, so).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For simplicity, we assume a mean of 0 for the prior.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/Este1le/gbopt.15 We choose Mat\u00e9rn52 and RBF kernel because they exhibit different properties and are both frequently used in literature. As shown inRasmussen (2003), a parameter \u03bd of the Mat\u00e9rn class of covariance functions can affect the smoothness of the functions drawn from GP. For \u03bd = 1 2 , the process becomes very rough, and for \u03bd \u2192 \u221e, the covariance function converges to RBF kernel.16 We can make an equivalence between the covariance matrix in multivariate Gaussian distribution and the inverse of a function of the graph Laplacian \u2206 (see Section 5.3 for details), so EI can also be applied to GB models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Except for en-ja, where tolerance is set to 1 BLEU, because BLEU difference between top two models is > 0.5.18 Including three initial evaluations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Four of these are randomly selected. We also include the configuration that achieved the best BLEU inTable 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work is supported in part by an Amazon Research Award and an IARPA MATERIAL grant. We are especially grateful to Michael Denkowski for helpful discussions and feedback throughout the project.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 GB EI R: GB with RBF kernel and EI.\u2022 GB EIF M: GB with Mat\u00e9rn52 kernel and expected influence as acquisition function.\u2022 GB EIF R: GB with RBF and EIF.We use the George library (Ambikasaran et al., 2014) for GP implementation. For all the methods, configurations are sampled without replacement.", |
| "cite_spans": [ |
| { |
| "start": 178, |
| "end": 204, |
| "text": "(Ambikasaran et al., 2014)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| }, |
| { |
| "text": "Results for single-objective optimization are summarized in Table 3 :\u2022 RS always needs to explore roughly half of all the NMT models to get the best one (ftb).\u2022 The effectiveness of BO is confirmed: On swen, BO EI M only takes 10% of the runtime used by RS to achieve the optima.\u2022 For ftb, the best GB outperforms the best BO on four of the six datasets: on en-ja, GB EI M reduces the ftb runtime of BO EI M by 38. GB EIF often works better than GB EI.\u2022 Mat\u00e9rn kernel and RBF kernel are almost equally good for both BO and GB.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 60, |
| "end": 67, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6.1.2" |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Fast direct methods for gaussian processes", |
| "authors": [ |
| { |
| "first": "Sivaram", |
| "middle": [], |
| "last": "Ambikasaran", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Foreman-Mackey", |
| "suffix": "" |
| }, |
| { |
| "first": "Leslie", |
| "middle": [], |
| "last": "Greengard", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "W" |
| ], |
| "last": "Hogg", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael O'", |
| "middle": [], |
| "last": "Neil", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1403.6015" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sivaram Ambikasaran, Daniel Foreman-Mackey, Leslie Greengard, David W. Hogg, and Michael O'Neil. 2014. Fast direct methods for gaussian processes. arXiv preprint arXiv:1403.6015.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Empirical investigation of optimization algorithms in neural machine translation", |
| "authors": [ |
| { |
| "first": "Parnia", |
| "middle": [], |
| "last": "Bahar", |
| "suffix": "" |
| }, |
| { |
| "first": "Tamer", |
| "middle": [], |
| "last": "Alkhouli", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan-Thorsten", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Jan-Steffen Brix", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "The Prague Bulletin of Mathematical Linguistics", |
| "volume": "108", |
| "issue": "1", |
| "pages": "13--25", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Parnia Bahar, Tamer Alkhouli, Jan-Thorsten Peter, Christopher Jan-Steffen Brix, and Hermann Ney. 2017. Empirical investigation of optimization algorithms in neural machine translation. The Prague Bulletin of Mathemati- cal Linguistics, 108(1):13-25.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 3rd International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2015. Neural machine translation by jointly learning to align and translate. In Pro- ceedings of the 3rd International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Collaborative hyperparameter tuning", |
| "authors": [ |
| { |
| "first": "R\u00e9mi", |
| "middle": [], |
| "last": "Bardenet", |
| "suffix": "" |
| }, |
| { |
| "first": "M\u00e1ty\u00e1s", |
| "middle": [], |
| "last": "Brendel", |
| "suffix": "" |
| }, |
| { |
| "first": "Bal\u00e1zs", |
| "middle": [], |
| "last": "K\u00e9gl", |
| "suffix": "" |
| }, |
| { |
| "first": "Michele", |
| "middle": [], |
| "last": "Sebag", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 30th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R\u00e9mi Bardenet, M\u00e1ty\u00e1s Brendel, Bal\u00e1zs K\u00e9gl, and Michele Sebag. 2013. Collaborative hyperparameter tuning. In Proceedings of the 30th International Conference on Machine Learning.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Speedconstrained tuning for statistical machine translation using Bayesian optimization", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Beck", |
| "suffix": "" |
| }, |
| { |
| "first": "Adri\u00e0", |
| "middle": [], |
| "last": "De Gispert", |
| "suffix": "" |
| }, |
| { |
| "first": "Gonzalo", |
| "middle": [], |
| "last": "Iglesias", |
| "suffix": "" |
| }, |
| { |
| "first": "Aurelien", |
| "middle": [], |
| "last": "Waite", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Byrne", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1604.05073" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Beck, Adri\u00e0 de Gispert, Gonzalo Iglesias, Aurelien Waite, and Bill Byrne. 2016. Speed- constrained tuning for statistical machine translation using Bayesian optimization. arXiv preprint arXiv:1604.05073.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Random search for hyper-parameter optimization", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Bergstra", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "13", |
| "issue": "", |
| "pages": "281--305", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Bergstra and Yoshua Bengio. 2012. Random search for hyper-parameter optimiza- tion. Journal of Machine Learning Research, 13(Feb):281-305.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Algorithms for hyper-parameter optimization", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [ |
| "S" |
| ], |
| "last": "Bergstra", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9mi", |
| "middle": [], |
| "last": "Bardenet", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Bal\u00e1zs", |
| "middle": [], |
| "last": "K\u00e9gl", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 25th Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James S. Bergstra, R\u00e9mi Bardenet, Yoshua Bengio, and Bal\u00e1zs K\u00e9gl. 2011. Algorithms for hyper-parameter optimization. In Proceedings of the 25th Advances in Neural Information Processing Systems.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Massive exploration of neural machine translation architectures", |
| "authors": [ |
| { |
| "first": "Denny", |
| "middle": [], |
| "last": "Britz", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Goldie", |
| "suffix": "" |
| }, |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1703.03906" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Denny Britz, Anna Goldie, Minh-Thang Luong, and Quoc Le. 2017. Massive exploration of neural machine translation architectures. arXiv preprint arXiv:1703.03906.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A tutorial on Bayesian optimization of expensive cost functions, with application to active user modeling and hierarchical reinforcement learning", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Brochu", |
| "suffix": "" |
| }, |
| { |
| "first": "Vlad", |
| "middle": [ |
| "M" |
| ], |
| "last": "Cora", |
| "suffix": "" |
| }, |
| { |
| "first": "Nando De", |
| "middle": [], |
| "last": "Freitas", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1012.2599" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric Brochu, Vlad M. Cora, and Nando De Freitas. 2010. A tutorial on Bayesian optimi- zation of expensive cost functions, with application to active user modeling and hierarchical reinforcement learning. arXiv preprint arXiv:1012.2599.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Efficient architecture search by network transformation", |
| "authors": [ |
| { |
| "first": "Han", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianyao", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Weinan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Han Cai, Tianyao Chen, Weinan Zhang, Yong Yu, and Jun Wang. 2018. Efficient architecture search by network transformation. In Thirty- Second AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "The multitarget TED talks task", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Duh. 2018. The multitarget TED talks task. http://www.cs.jhu.edu/\u223ckevinduh/ a/multitarget-tedtalks/.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Comparison between genetic algorithms and particle swarm optimization", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Russell", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuhui", |
| "middle": [], |
| "last": "Eberhart", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "International Conference on Evolutionary Programming", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Russell C. Eberhart and Yuhui Shi. 1998. Comparison between genetic algorithms and particle swarm optimization. In International Conference on Evolutionary Programming.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Efficient benchmarking of hyperparameter optimizers via surrogates", |
| "authors": [ |
| { |
| "first": "Katharina", |
| "middle": [], |
| "last": "Eggensperger", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Hutter", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 29th AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katharina Eggensperger, Frank Hutter, Holger Hoos, and Kevin Leyton-Brown. 2015. Effi- cient benchmarking of hyperparameter optimiz- ers via surrogates. In Proceedings of the 29th AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Hypervolumebased expected improvement: Monotonicity properties and exact computation", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "M" |
| ], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Andr\u00e9", |
| "middle": [ |
| "H" |
| ], |
| "last": "Emmerich", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [ |
| "Willem" |
| ], |
| "last": "Deutz", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Klinkenberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "IEEE Congress of Evolutionary Computation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael T. M. Emmerich, Andr\u00e9 H. Deutz, and Jan Willem Klinkenberg. 2011. Hypervolume- based expected improvement: Monotonicity properties and exact computation. In 2011 IEEE Congress of Evolutionary Computation (CEC).", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Hyperparameter optimization", |
| "authors": [ |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Feurer", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Hutter", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Automated Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "3--33", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthias Feurer and Frank Hutter. 2019. Hyperparameter optimization. In Automated Machine Learning, pages 3-33. Springer.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Initializing Bayesian hyperparameter optimization via meta-learning", |
| "authors": [ |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Feurer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jost", |
| "middle": [ |
| "Tobias" |
| ], |
| "last": "Springenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Hutter", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Twenty-Ninth AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthias Feurer, Jost Tobias Springenberg, and Frank Hutter. 2015. Initializing Bayesian hyperparameter optimization via meta-learning. In Twenty-Ninth AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "A tutorial on Bayesian optimization", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [ |
| "I" |
| ], |
| "last": "Frazier", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1807.02811" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter I. Frazier. 2018. A tutorial on Bayesian optimization. arXiv preprint arXiv:1807.02811.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Convolutional sequence to sequence learning", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Gehring", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Denis", |
| "middle": [], |
| "last": "Yarats", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [ |
| "N" |
| ], |
| "last": "Dauphin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 34th International Conference on Machine Learning", |
| "volume": "70", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Gehring, Michael Auli, David Grangier, Denis Yarats, and Yann N. Dauphin. 2017. Convolutional sequence to sequence learning. In Proceedings of the 34th International Conference on Machine Learning-Volume 70.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Algorithms and analyses for maximal vector computation. The VLDB Journal-The International Journal on Very Large Data Bases", |
| "authors": [ |
| { |
| "first": "Parke", |
| "middle": [], |
| "last": "Godfrey", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Shipley", |
| "suffix": "" |
| }, |
| { |
| "first": "Jarek", |
| "middle": [], |
| "last": "Gryz", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "16", |
| "issue": "", |
| "pages": "5--28", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Parke Godfrey, Ryan Shipley, and Jarek Gryz. 2007. Algorithms and analyses for maximal vector computation. The VLDB Journal-The International Journal on Very Large Data Bases, 16(1):5-28.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Dejan Tu\u0161ar, and Tea Tu\u0161ar", |
| "authors": [ |
| { |
| "first": "Nikolaus", |
| "middle": [], |
| "last": "Hansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Anne", |
| "middle": [], |
| "last": "Auger", |
| "suffix": "" |
| }, |
| { |
| "first": "Dimo", |
| "middle": [], |
| "last": "Brockhoff", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Coco: Performance assessment", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1605.03560" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikolaus Hansen, Anne Auger, Dimo Brockhoff, Dejan Tu\u0161ar, and Tea Tu\u0161ar. 2016. Coco: Performance assessment. arXiv preprint arXiv:1605.03560.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Sockeye: A toolkit for neural machine translation", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hieber", |
| "suffix": "" |
| }, |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "Domhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Denkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Vilar", |
| "suffix": "" |
| }, |
| { |
| "first": "Artem", |
| "middle": [], |
| "last": "Sokolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ann", |
| "middle": [], |
| "last": "Clifton", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1712.05690" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Hieber, Tobias Domhan, Michael Denkowski, David Vilar, Artem Sokolov, Ann Clifton, and Matt Post. 2017. Sockeye: A toolkit for neural machine translation. arXiv preprint arXiv:1712.05690.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Sequential model-based optimization for general algorithm configuration", |
| "authors": [ |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Hutter", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Holger", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Hoos", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Leyton-Brown", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 5th International Conference on Learning and Intelligent Optimization", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Frank Hutter, Holger H. Hoos, and Kevin Leyton-Brown. 2011. Sequential model-based optimization for general algorithm configura- tion. In Proceedings of the 5th International Conference on Learning and Intelligent Opti- mization.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Efficient global optimization of expensive black-box functions", |
| "authors": [ |
| { |
| "first": "Donald", |
| "middle": [ |
| "R" |
| ], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Schonlau", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "J" |
| ], |
| "last": "Welch", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Journal of Global Optimization", |
| "volume": "13", |
| "issue": "4", |
| "pages": "455--492", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Donald R. Jones, Matthias Schonlau, and William J. Welch. 1998. Efficient global optimization of expensive black-box functions. Journal of Global Optimization, 13(4):455-492.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Metasurrogate benchmarking for hyperparameter optimization", |
| "authors": [ |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhenwen", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Hutter", |
| "suffix": "" |
| }, |
| { |
| "first": "Neil", |
| "middle": [], |
| "last": "Lawrence", |
| "suffix": "" |
| }, |
| { |
| "first": "Javier", |
| "middle": [], |
| "last": "Gonzalez", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1905.12982" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aaron Klein, Zhenwen Dai, Frank Hutter, Neil Lawrence, and Javier Gonzalez. 2019. Meta- surrogate benchmarking for hyperparameter optimization. arXiv preprint arXiv:1905.12982.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Tabular benchmarks for joint architecture and hyperparameter optimization", |
| "authors": [ |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Hutter", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1905.04970" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aaron Klein and Frank Hutter. 2019. Tabular benchmarks for joint architecture and hyper- parameter optimization. arXiv preprint arXiv: 1905.04970.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Random search and reproducibility for neural architecture search", |
| "authors": [ |
| { |
| "first": "Liam", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ameet", |
| "middle": [], |
| "last": "Talwalkar", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference on Uncertainty in Artificial Intelligence (UAI)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liam Li and Ameet Talwalkar. 2019. Random search and reproducibility for neural architec- ture search. In Proceedings of the Conference on Uncertainty in Artificial Intelligence (UAI).", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Findings of the first shared task on machine translation robustness", |
| "authors": [ |
| { |
| "first": "Xian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonios", |
| "middle": [], |
| "last": "Anastasopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Orhan", |
| "middle": [], |
| "last": "Firat", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Juan", |
| "middle": [], |
| "last": "Pino", |
| "suffix": "" |
| }, |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xian Li, Paul Michel, Antonios Anastasopoulos, Yonatan Belinkov, Nadir Durrani, Orhan Firat, Philipp Koehn, Graham Neubig, Juan Pino, and Hassan Sajjad. 2019. Findings of the first shared task on machine translation robustness. In Proceedings of the Fourth Conference on Machine Translation.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Hieu Hoang, Mark Briers, and Allen Malony", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Lim", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Heafield", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Exploring hyper-parameter optimization for neural machine translation on gpu architectures", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1805.02094" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Lim, Kenneth Heafield, Hieu Hoang, Mark Briers, and Allen Malony. 2018. Explor- ing hyper-parameter optimization for neural machine translation on gpu architectures. arXiv preprint arXiv:1805.02094.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Progressive neural architecture search", |
| "authors": [ |
| { |
| "first": "Chenxi", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Barret", |
| "middle": [], |
| "last": "Zoph", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathon", |
| "middle": [], |
| "last": "Shlens", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Hua", |
| "suffix": "" |
| }, |
| { |
| "first": "Li-Jia", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Fei-Fei", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Yuille", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Murphy", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the European Conference on Computer Vision (ECCV)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chenxi Liu, Barret Zoph, Maxim Neumann, Jonathon Shlens, Wei Hua, Li-Jia Li, Li Fei-Fei, Alan Yuille, Jonathan Huang, and Kevin Murphy. 2018a. Progressive neural architecture search. In Proceedings of the European Conference on Computer Vision (ECCV).", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Hierarchical representations for efficient architecture search", |
| "authors": [ |
| { |
| "first": "Hanxiao", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Simonyan", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Chrisantha", |
| "middle": [], |
| "last": "Fernando", |
| "suffix": "" |
| }, |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hanxiao Liu, Karen Simonyan, Oriol Vinyals, Chrisantha Fernando, and Koray Kavukcuoglu. 2018b. Hierarchical representations for efficient architecture search. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "A review of automatic selection methods for machine learning algorithms and hyper-parameter values", |
| "authors": [ |
| { |
| "first": "Gang", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Network Modeling Analysis in Health Informatics and Bioinformatics", |
| "volume": "5", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gang Luo. 2016. A review of automatic selection methods for machine learning algorithms and hyper-parameter values. Network Modeling Analysis in Health Informatics and Bioinfor- matics, 5(1):18.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Bayesian optimisation for machine translation", |
| "authors": [ |
| { |
| "first": "Yishu", |
| "middle": [], |
| "last": "Miao", |
| "suffix": "" |
| }, |
| { |
| "first": "Ziyu", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.7180" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yishu Miao, Ziyu Wang, and Phil Blunsom. 2014. Bayesian optimisation for machine translation. arXiv preprint arXiv:1412.7180.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Blue: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Blue: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting on Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Multiobjective optimization using gaussian process emulators via stepwise uncertainty reduction", |
| "authors": [ |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Picheny", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Statistics and Computing", |
| "volume": "25", |
| "issue": "6", |
| "pages": "1265--1280", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor Picheny. 2015. Multiobjective optimization using gaussian process emulators via stepwise uncertainty reduction. Statistics and Comput- ing, 25(6):1265-1280.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Multiobjective optimization on a limited budget of evaluations using model-assisted S-metric selection", |
| "authors": [ |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Ponweiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "Wagner", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Biermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "Vincze", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "International Conference on Parallel Problem Solving from Nature", |
| "volume": "", |
| "issue": "", |
| "pages": "784--794", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wolfgang Ponweiser, Tobias Wagner, Dirk Biermann, and Markus Vincze. 2008. Multi- objective optimization on a limited budget of evaluations using model-assisted S-metric selection. In International Conference on Parallel Problem Solving from Nature, pages 784-794. Springer.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Evolution strategy based automatic tuning of neural machine translation systems", |
| "authors": [ |
| { |
| "first": "Takahiro", |
| "middle": [], |
| "last": "Hao Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Shinozaki", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 14th International Workshop on Spoken Language Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hao Qin, Takahiro Shinozaki, and Kevin Duh. 2017. Evolution strategy based automatic tuning of neural machine translation systems. In Proceedings of the 14th International Workshop on Spoken Language Translation.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Gaussian processes in machine learning", |
| "authors": [ |
| { |
| "first": "Carl", |
| "middle": [ |
| "Edward" |
| ], |
| "last": "Rasmussen", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Summer School on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "63--71", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carl Edward Rasmussen. 2003. Gaussian processes in machine learning. In Summer School on Machine Learning, pages 63-71. Springer.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Regularized evolution for image classifier architecture search", |
| "authors": [ |
| { |
| "first": "Esteban", |
| "middle": [], |
| "last": "Real", |
| "suffix": "" |
| }, |
| { |
| "first": "Alok", |
| "middle": [], |
| "last": "Aggarwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanping", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Esteban Real, Alok Aggarwal, Yanping Huang, and Quoc V. Le. 2019. Regularized evolution for image classifier architecture search. In Proceedings of the AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Global versus local search in constrained optimization of computer models", |
| "authors": [ |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Schonlau", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "J" |
| ], |
| "last": "Welch", |
| "suffix": "" |
| }, |
| { |
| "first": "Donald", |
| "middle": [ |
| "R" |
| ], |
| "last": "Jones", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Lecture Notes-Monograph Series", |
| "volume": "", |
| "issue": "", |
| "pages": "11--25", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthias Schonlau, William J. Welch, and Donald R. Jones. 1998. Global versus local search in constrained optimization of computer models. Lecture Notes-Monograph Series pages 11-25.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Pareto frontier learning with expensive correlated objectives", |
| "authors": [ |
| { |
| "first": "Amar", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "Zoubin", |
| "middle": [], |
| "last": "Ghahramani", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "1919--1927", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amar Shah and Zoubin Ghahramani. 2016. Pareto frontier learning with expensive correlated objectives. In International Conference on Machine Learning, pages 1919-1927.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Taking the human out of the loop: A review of Bayesian optimization", |
| "authors": [ |
| { |
| "first": "Bobak", |
| "middle": [], |
| "last": "Shahriari", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Swersky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ziyu", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [ |
| "P" |
| ], |
| "last": "Adams", |
| "suffix": "" |
| }, |
| { |
| "first": "Nando De", |
| "middle": [], |
| "last": "Freitas", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the IEEE", |
| "volume": "104", |
| "issue": "1", |
| "pages": "148--175", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bobak Shahriari, Kevin Swersky, Ziyu Wang, Ryan P. Adams, and Nando De Freitas. 2015. Taking the human out of the loop: A review of Bayesian optimization. Proceedings of the IEEE, 104(1):148-175.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Evolutionary optimization algorithms", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Simon", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Simon. 2013. Evolutionary optimization algorithms. John Wiley & Sons.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Scalable Bayesian optimization using deep neural networks", |
| "authors": [ |
| { |
| "first": "Jasper", |
| "middle": [], |
| "last": "Snoek", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Rippel", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Swersky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Kiros", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadathur", |
| "middle": [], |
| "last": "Satish", |
| "suffix": "" |
| }, |
| { |
| "first": "Narayanan", |
| "middle": [], |
| "last": "Sundaram", |
| "suffix": "" |
| }, |
| { |
| "first": "Mostofa", |
| "middle": [], |
| "last": "Patwary", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "R" |
| ], |
| "last": "Prabhat", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Adams", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 32nd International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jasper Snoek, Oren Rippel, Kevin Swersky, Ryan Kiros, Nadathur Satish, Narayanan Sundaram, Mostofa Patwary, M.R. Prabhat, and Ryan Adams. 2015. Scalable Bayesian optimization using deep neural networks. In Proceedings of the 32nd International Conference on Machine Learning.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "The evolved transformer", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "So", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 36th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David So, Quoc Le, and Chen Liang. 2019. The evolved transformer. In Proceedings of the 36th International Conference on Machine Learning.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 28th Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural networks. In Proceedings of the 28th Advances in Neural Information Processing Systems.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Multiobjective optimization of expensiveto-evaluate deterministic computer simulator models", |
| "authors": [ |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Svenson", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Santner", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Computational Statistics & Data Analysis", |
| "volume": "94", |
| "issue": "", |
| "pages": "250--264", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joshua Svenson and Thomas Santner. 2016. Multiobjective optimization of expensive- to-evaluate deterministic computer simulator models. Computational Statistics & Data Analysis, 94:250-264.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 31st Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Proceedings of the 31st Advances in Neural Information Processing Systems.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Nas-bench-101: Towards reproducible neural architecture search", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Ying", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Esteban", |
| "middle": [], |
| "last": "Real", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Christiansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Murphy", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Hutter", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1902.09635" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Ying, Aaron Klein, Esteban Real, Eric Christiansen, Kevin Murphy, and Frank Hutter. 2019. Nas-bench-101: Towards reproducible neural architecture search. arXiv preprint arXiv:1902.09635.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Semi-supervised learning with graphs", |
| "authors": [ |
| { |
| "first": "Xiaojin", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaojin Zhu. 2005. Semi-supervised learning with graphs. Ph.D. Thesis.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Semi-supervised learning using gaussian fields and harmonic functions", |
| "authors": [ |
| { |
| "first": "Xiaojin", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zoubin", |
| "middle": [], |
| "last": "Ghahramani", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "D" |
| ], |
| "last": "Lafferty", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 20th International Conference on Machine Learning (ICML-03)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaojin Zhu, Zoubin Ghahramani, and John D. Lafferty. 2003. Semi-supervised learning using gaussian fields and harmonic functions. In Proceedings of the 20th International Conference on Machine Learning (ICML-03).", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Multiobjective optimization using evolutionary algorithms-a comparative case study", |
| "authors": [ |
| { |
| "first": "Eckart", |
| "middle": [], |
| "last": "Zitzler", |
| "suffix": "" |
| }, |
| { |
| "first": "Lothar", |
| "middle": [], |
| "last": "Thiele", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "International Conference on Parallel Problem Solving from Nature", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eckart Zitzler and Lothar Thiele. 1998. Multiobjective optimization using evolutionary algorithms-a comparative case study. In International Conference on Parallel Problem Solving from Nature.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Neural architecture search with reinforcement learning", |
| "authors": [ |
| { |
| "first": "Barret", |
| "middle": [], |
| "last": "Zoph", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1611.01578" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Barret Zoph and Quoc V. Le. 2016. Neural architecture search with reinforcement learning. arXiv preprint arXiv:1611.01578.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "text": "The workflow of HPO algorithm selection/development. HPO algorithm candidates are first evaluated on lookup tables built from multiple MT datasets.", |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "text": "Correlation of hyperparameter rankings across MT datasets. 8", |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "text": "The importance of each hyperparameter (top) and the eight most important hyperparameter pairs (bottom) for top 1% (left), and all NMT models ranked by BLEU on en-ja (middle) and sw-en (right).", |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "text": "They comment: ''Of the 12 papers published since 2018 at NeurIPS, ICML, and ICLR that introduce novel Neural Architecture Search methods, none are exactly reproducible.''", |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF4": { |
| "uris": null, |
| "text": "Left: Best BLEU found by different HPO methods over time on ja-en NMT models. Right: Mean squared error achieved by different HPO methods over time on ja-en NMT models. We plot the median and the 25th and 75th quantile across 100 independent runs.", |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF5": { |
| "uris": null, |
| "text": "Pareto-front approximation during multiobjective optimization using BO M and GB M on ru-en. ''Step'' is the number of evaluated MT models. Gray circles form the Pareto set of initial seeds. In this example, all three initial seeds happen to be Pareto points. Gold stars are the Pareto solutions of the dataset. Lower-right corner is better.", |
| "type_str": "figure", |
| "num": null |
| }, |
| "TABREF0": { |
| "text": "represents a reasonable set of values used in the literature. Nevertheless, it should be clarified that empirical findings from table-lookup datasets should be interpreted in light of the limits of hyperparameter range and granularity.", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>dataset</td><td>bpe (1k)</td><td>#layers</td><td>#embed</td><td>#hidden</td><td colspan=\"2\">#att heads init lr (10 \u22124 )</td></tr><tr><td>zh, ru, ja, en</td><td>10, 30, 50</td><td colspan=\"3\">2, 4 256, 512, 1024 1024, 2048</td><td>8, 16</td><td>3, 6, 10</td></tr><tr><td>sw</td><td colspan=\"4\">1, 2, 4, 8, 16, 32 1, 2, 4, 6 256, 512, 1024 1024, 2048</td><td>8, 16</td><td>3, 6, 10</td></tr><tr><td>so</td><td>1, 2, 4, 8, 16, 32</td><td colspan=\"3\">1, 2, 4 256, 512, 1024 1024, 2048</td><td>8, 16</td><td>3, 6, 10</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "text": "Hyperparameter search space for the NMT systems.", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td colspan=\"9\">Dataset #models Best BLEU bpe #layers #embed #hidden #att heads init lr</td></tr><tr><td>zh-en</td><td>118</td><td>14.66</td><td>30k</td><td>4</td><td>512</td><td>1024</td><td>16</td><td>3e-4</td></tr><tr><td>ru-en</td><td>176</td><td>20.23</td><td>10k</td><td>4</td><td>256</td><td>2048</td><td>8</td><td>3e-4</td></tr><tr><td>ja-en</td><td>150</td><td>16.41</td><td>30k</td><td>4</td><td>512</td><td>2048</td><td>8</td><td>3e-4</td></tr><tr><td>en-ja</td><td>168</td><td>20.74</td><td>10k</td><td>4</td><td>1024</td><td>2048</td><td>8</td><td>3e-4</td></tr><tr><td>sw-en</td><td>767</td><td>26.09</td><td>1k</td><td>2</td><td>256</td><td>1024</td><td>8</td><td>6e-4</td></tr><tr><td>so-en</td><td>604</td><td>11.23</td><td>8k</td><td>2</td><td>512</td><td>1024</td><td>8</td><td>3e-4</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "text": "Evaluation on NMT models trained with different language pairs for multiobjective (BLEU & decoding time) optimization. Fixed-target one (fto) and fixed-target all (fta) are measured by number of model evaluations, and fixed-budget (fbp) is measured by number of Pareto-optimal points. J is the size of the true Pareto set and B is the runtime budget.19", |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |