| { |
| "paper_id": "2022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:30:52.263378Z" |
| }, |
| "title": "DACT-BERT: Differentiable Adaptive Computation Time for an Efficient BERT Inference", |
| "authors": [ |
| { |
| "first": "Crist\u00f3bal", |
| "middle": [], |
| "last": "Eyzaguirre", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pontificia Universidad Cat\u00f3lica de Chile", |
| "location": { |
| "postCode": "2 KU", |
| "settlement": "Leuven" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Felipe", |
| "middle": [], |
| "last": "Del R\u00edo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pontificia Universidad Cat\u00f3lica de Chile", |
| "location": { |
| "postCode": "2 KU", |
| "settlement": "Leuven" |
| } |
| }, |
| "email": "fidelrio@uc.cl" |
| }, |
| { |
| "first": "Vladimir", |
| "middle": [], |
| "last": "Araujo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pontificia Universidad Cat\u00f3lica de Chile", |
| "location": { |
| "postCode": "2 KU", |
| "settlement": "Leuven" |
| } |
| }, |
| "email": "vgaraujo@uc.cl" |
| }, |
| { |
| "first": "Alvaro", |
| "middle": [], |
| "last": "Soto", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pontificia Universidad Cat\u00f3lica de Chile", |
| "location": { |
| "postCode": "2 KU", |
| "settlement": "Leuven" |
| } |
| }, |
| "email": "asoto@ing.puc.cl" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Large-scale pre-trained language models have shown remarkable results in diverse NLP applications. However, these performance gains have been accompanied by a significant increase in computation time and model size, stressing the need to develop new or complementary strategies to increase the efficiency of these models. This paper proposes DACT-BERT, a differentiable adaptive computation time strategy for BERT-like models. DACT-BERT adds an adaptive computational mechanism to BERT's regular processing pipeline, which controls the number of Transformer blocks that need to be executed at inference time. By doing this, the model learns to combine the most appropriate intermediate representations for the task at hand. Our experiments demonstrate that our approach, when compared to the baselines, excels on a reduced computational regime and is competitive in other less restrictive ones. Code available at https://github.com/ceyzaguirre4/dact_bert.", |
| "pdf_parse": { |
| "paper_id": "2022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Large-scale pre-trained language models have shown remarkable results in diverse NLP applications. However, these performance gains have been accompanied by a significant increase in computation time and model size, stressing the need to develop new or complementary strategies to increase the efficiency of these models. This paper proposes DACT-BERT, a differentiable adaptive computation time strategy for BERT-like models. DACT-BERT adds an adaptive computational mechanism to BERT's regular processing pipeline, which controls the number of Transformer blocks that need to be executed at inference time. By doing this, the model learns to combine the most appropriate intermediate representations for the task at hand. Our experiments demonstrate that our approach, when compared to the baselines, excels on a reduced computational regime and is competitive in other less restrictive ones. Code available at https://github.com/ceyzaguirre4/dact_bert.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The use of pre-trained language models based on large-scale Transformers (Vaswani et al., 2017) has gained popularity after the release of BERT (Devlin et al., 2019) . The usual pipeline consists of finetuning BERT by adapting and retraining its classification head to meet the requirements of a specific NLP task. Unfortunately, the benefits of using a powerful model are also accompanied by a highly demanding computational load. In effect, current pre-trained language models such as BERT have millions of parameters, making them computationally intensive both during training and inference.", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 95, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 144, |
| "end": 165, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "While high accuracy is usually the ultimate goal, computational efficiency is also desirable. The use of a demanding model not only causes longer processing times and limits applicability to lowend devices, but it also has major implications * Work done at Pontificia Universidad Cat\u00f3lica de Chile. in terms of the environmental impact of AI technologies (Schwartz et al., 2020) . As an example, Strubell et al. (2019) provides an estimation of the carbon footprint of several large NLP models, including BERT, concluding that they are becoming unfriendly to the environment.", |
| "cite_spans": [ |
| { |
| "start": 355, |
| "end": 378, |
| "text": "(Schwartz et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 396, |
| "end": 418, |
| "text": "Strubell et al. (2019)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recent works have shown that behind BERT's immense capacity, there is considerable redundancy and over-parametrization (Kovaleva et al., 2019; Rogers et al., 2020) . Consequently, others works have explored strategies to develop efficient and compact versions of BERT. One such strategy known as dynamic Transformers consists of providing BERT with an adaptive mechanism to control how many Transformers blocks are used .", |
| "cite_spans": [ |
| { |
| "start": 119, |
| "end": 142, |
| "text": "(Kovaleva et al., 2019;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 143, |
| "end": 163, |
| "text": "Rogers et al., 2020)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we present DACT-BERT, an alternative to current dynamic Transformers that uses an Adaptive Computation Time (ACT) mechanism (Graves, 2016) to control the complexity of the processing pipeline of BERT. This mechanism controls the number of Transformer blocks executed at inference time by using additional classifiers. This allows resulting models to take advantage of the information already encoded in intermediate layers without the need to run all layers. Specifically, our model integrates DACT, a fully differentiable variant of the adaptive computation module (Eyzaguirre and Soto, 2020) that allows us to train a halting neuron after each Transformer block. This neuron indicates the confidence the model has on returning the correct answer after executing said block. We use the DACT algorithm to determine when the answer stabilizes in a given output using the halting neuron and halt once it is sure running more blocks cannot change the output.", |
| "cite_spans": [ |
| { |
| "start": 139, |
| "end": 153, |
| "text": "(Graves, 2016)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 581, |
| "end": 608, |
| "text": "(Eyzaguirre and Soto, 2020)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Several architectures have been designed to avoid overcomputing in Transformer-based models. According to , there are two groups. into an accumulated answer a n . Later, during inference, the model is halted once a n \u2248 a N .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "One such strategy is to use lightweight architectures that are trained from scratch. As an example, ALBERT (Lan et al., 2020) and Universal Transformer (Dehghani et al., 2019) propose cross-layer parameter sharing as a way to improve model efficiency. The latter also includes an ACT-based (Graves, 2016) halting mechanism that is not fully differentiable as DACT-BERT is.", |
| "cite_spans": [ |
| { |
| "start": 107, |
| "end": 125, |
| "text": "(Lan et al., 2020)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 152, |
| "end": 175, |
| "text": "(Dehghani et al., 2019)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 290, |
| "end": 304, |
| "text": "(Graves, 2016)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Static Efficient Transformers", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "A second strategy is to distill the knowledge of pretrained models into a more compact \"student\". Models such as PKD-BERT (Sun et al., 2019) , TinyBERT (Jiao et al., 2020) , and Distil-BERT compress the knowledge of large models, the \"teachers\", into more compact or efficient ones to obtain similar performance at a reduced computation or memory cost. While these approaches effectively reduce the total calculation needed to execute the model, they are limited in the same way as BERT, they do not take into account that some examples could be less complicated than others and use the same amount of computation.", |
| "cite_spans": [ |
| { |
| "start": 122, |
| "end": 140, |
| "text": "(Sun et al., 2019)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 152, |
| "end": 171, |
| "text": "(Jiao et al., 2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Static Efficient Transformers", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Recently, a series of algorithms have been proposed to reduce computation in Transformer language models based on early exiting (Kaya et al., 2019; Han et al., 2021) . Models such as DeeBERT , FastBert , PABEE , and Depths Transformers (Elbayad et al., 2020) introduce intermediate classifiers after each Transformer block. At inference, a \"halting criterion\" is used to dynamically determine the number of blocks needed to perform a specific prediction. Instead of using a confidence approach (Guo et al., 2017) to determine when to stop, recent approaches rely on computing a particular heuristic (such as Shannon's entropy or Mutual Information) Liu et al., 2021) , an agreement between intermediate classifiers , a trained confidence predictor (Xin et al., 2021) , or directly the amount of steps based on an heuristic based training (Elbayad et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 128, |
| "end": 147, |
| "text": "(Kaya et al., 2019;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 148, |
| "end": 165, |
| "text": "Han et al., 2021)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 494, |
| "end": 512, |
| "text": "(Guo et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 649, |
| "end": 666, |
| "text": "Liu et al., 2021)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 748, |
| "end": 766, |
| "text": "(Xin et al., 2021)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 838, |
| "end": 860, |
| "text": "(Elbayad et al., 2020)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dynamic Transformers", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Unlike previous works that use heuristic proxies of models confidence to decide when to halt, DACT-BERT is based on a learning scheme that induces the model to halt when it predicts that its current answer will not change with further processing. As an illustrative example consider a difficult input. Here, our model could \"understand\" that further processing steps are superfluous and decide to stop early, even if its current answer has a low confidence. On the other hand, existing early stopping models would keep wasting computation because their confidence is low.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dynamic Transformers", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Computation Time for BERT Dynamic early stopping methods use a proxy of model confidence to decide when it is safe to cut computation. In this work our signaling module, DACT, approximates this gating mechanism with a soft variant that allows our model to independently learn the confidence function. This mechanism can then be used to detect when stable results are obtained, allowing for the reduction of the total number of steps necessary for a given prediction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "DACT-BERT: Differentiable Adaptive", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The original formulation of DACT (Eyzaguirre and Soto, 2020) applies this module to recurrent models. In our case, we adapt the formulation to the case of Transformer based architectures, mainly BERT.", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 60, |
| "text": "(Eyzaguirre and Soto, 2020)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "DACT-BERT: Differentiable Adaptive", |
| "sec_num": "3" |
| }, |
| { |
| "text": "As shown in Figure 1 and detailed in Algorithm 1, DACT-BERT introduces additional linear layers after each computational unit, similar to the offramps in DeeBERT or the student classifiers in the work of . However, differently from previous approaches, each n-th DACT module also computes an scalar confidence score, or halting value h n , in addition to the output vector y n . Following Devlin et al. (2019) , both, y n and h n , are estimated by using the classification token ([CLS]) that is included in BERT as part of the output representation of each layer.", |
| "cite_spans": [ |
| { |
| "start": 389, |
| "end": 409, |
| "text": "Devlin et al. (2019)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Method Description", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "During training, all the output vectors and halting values are accumulated to obtain a n i.e., encoding the model's best guess after unrolling n Transformer layers. It is combined using the final predicted probabilities p n , allowing it to be rewritten as the weighted average of all intermediate outputs y n multiplied by a function of the confidences of earlier blocks. Line 8 shows how the output vectors are combined using a function of the halting values, to obtain the final predicted probabilities.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Description", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The model output is built inductively by using a monotonically decreasing function of the model confidence, p n , to interpolate between the current step's answer and the result of the same operation from the previous step. We then train the model to reduce the classification loss of the final output with a regularizer that induces a bias towards reduced computation. Unlike the regularizer used by Eyzaguirre and Soto (2020), we use:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Description", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "L(x, y) = L(x, y) + \u03c4 n i=1 h i (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Description", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where \u03c4 is a hyper-parameter used to moderate the trade-off between complexity and error. We find empirically that our changes help convergence and further binarize the halting probabilities. Notably, the formulation is end-to-end differentiable. This allows to fine-tune the weights of the underlying backbone, i.e. the Transformer and embedding layers, using a joint optimization with the process that trains the intermediate classifiers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Description", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Algorithm 1 DACT Input: M model with N blocks Input: is_training \u2208 {True, False} 1: p n \u2190 1 2: a n \u2190 \u20d7 0 3: for step n = 1, 2, . . . N do", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Description", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "# Get output and confidence 5:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "4:", |
| "sec_num": null |
| }, |
| { |
| "text": "y n \u2190 GetOutputM odule(M, n) 6: h n \u2190 GetHaltM odule(M, n) 7:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "4:", |
| "sec_num": null |
| }, |
| { |
| "text": "# Combine with previous outputs 8:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "4:", |
| "sec_num": null |
| }, |
| { |
| "text": "a n \u2190 (y n * p n\u22121 ) + (a n * (1 \u2212 p n\u22121 )) 9:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "4:", |
| "sec_num": null |
| }, |
| { |
| "text": "# Update halting probability 10: end if 17: end for Output: Approximate final answer a n", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "4:", |
| "sec_num": null |
| }, |
| { |
| "text": "p n \u2190 p n\u22121 *", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "4:", |
| "sec_num": null |
| }, |
| { |
| "text": "The inductive formulation of a n lends itself to calculating upper and lower bounds on the probabilities of the output classes. At inference, execution halts once the predicted probabilities for the topclass c * in a n after running all N \u2212 n remaining steps is still higher than the highest value for the runner-up class c ru , and by extension of any other class, then halting doesn't change the output:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dynamic Computation at Inference", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Pr(c * , n)(1 \u2212 p n ) N \u2212n \u2265 Pr(c ru , n) + p n (N \u2212 n)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dynamic Computation at Inference", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "(2) That is, the model stops executing additional blocks once it finds that doing so will not change the class with maximum probability in the output because the difference between the top class and the rest is insurmountable. Therefore, the halting condition remains the same as the original DACT formulation (Eyzaguirre and Soto, 2020) .", |
| "cite_spans": [ |
| { |
| "start": 310, |
| "end": 337, |
| "text": "(Eyzaguirre and Soto, 2020)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dynamic Computation at Inference", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The training of the module follows a two step process. First, the underlying Transformer model must be tuned to the objective task. This ensures a good starting point onto which the DACT module can then be adapted to and speeding up convergence. This is followed by a second fine-tuning phase where the complete model is jointly trained for the task. This differs slightly from existing dy-namic Transformer methods, which first pre-train the backbone and then freeze it to modify only the classifier weights.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We tested our method using both BERT and RoBERTa backbones, evaluating both models on six different tasks from the GLUE benchmark (Wang et al., 2018) . We use DeeBERT and PABEE as our dynamic baselines, using the same backbones for a fair comparison, and the respective non-adaptive backbones along with DistilBERT as static baselines.", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 149, |
| "text": "(Wang et al., 2018)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Our model was developed using PyTorch (Paszke et al., 2017) on top of the implementations released by and , as well as the HuggingFace Transformers library . Because the focus of this paper was to introduce an alternative architecture of dynamic Transformers and not achieve state of the art results we use the default parameters and architectures from the Transformers library.", |
| "cite_spans": [ |
| { |
| "start": 38, |
| "end": 59, |
| "text": "(Paszke et al., 2017)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Both DeeBERT and DACT-BERT experiments were repeated three times to obtain the confidence intervals (95% confidence) shown in Figure 2 , each time using a different random initialization for the weights in the auxiliary classifiers 1 . Results for FastBERT are not reported since both DeeBERT and FastBERT use the same entropy-threshold halting criterion.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 126, |
| "end": 134, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Each experiment was run using a single 11GB NVIDIA graphics accelerator, which allows for training on the complete batch using 32-bit precision and without needing gradient accumulation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To compare the trade-off that exists between computation efficiency and the performances obtained with it, we computed efficiency-performance diagrams for the validation set. Efficiency was measured as the proportion of Transformer layers used compared to the total number of layers in their static counterparts. The specific metrics for performance are those suggested in the GLUE paper (Wang et al., 2018) for each task.", |
| "cite_spans": [ |
| { |
| "start": 388, |
| "end": 407, |
| "text": "(Wang et al., 2018)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Computational Efficiency", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In our experiments we fine-tune the backbone model for the GLUE tasks using the default values of the hyper-parameters. For the second stage we vary the value of \u03c4 in Equation 1to compute our computation-performance diagram curves, selecting from a set of fixed values for all the experiments: \u03c4 \u2208 {5 \u2022 10 \u22125 , 5 \u2022 10 \u22124 , 5 \u2022 10 \u22123 , 5 \u2022 10 \u22122 , 5 \u2022 10 \u22121 }. By modifying this hyperparameter in DACT we can manage the amount of computation the model will perform and record the performance it managed to achieve at this level.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Computational Efficiency", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Similarly, using DeeBERT to create the computation-performance diagrams the entropy threshold was varied continuously in increments of 0.05. For PaBEE we fluctuate the patience value between 1 and 12, effectively trying out the full range. The results for the unmodified static backbones are also included as a reference, as are the results obtained by the half-depth DistilBERT pretrained model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Computational Efficiency", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The area under the curve (AUC) in the Performance vs. Efficiency plot shown in Figure 2 shows our approach improves the trade-off between precision and computation. As was to be expected, all models perform similarly when saving little computation as they replicate the results achieved by the non-adaptive BERT backbone that performs a similar number of steps. On the other hand, when using limited amounts of computation our model outperforms the alternatives in almost every task, especially in tasks for with more training data available. We attribute this advantage in trading off computation and performance to fine-tuning of the backbone weights for reduced computation. Intuitively, as we move away from the 12 step regime for which the underlying static model was trained, more modification of the weights is required. Recall that of all the Dynamic Transformer algorithms only DACT-BERT can modify the Transformer weights because of its full-differentiability.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 79, |
| "end": 87, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Computational Efficiency", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Importantly, because our model learns to regulate itself, it shows remarkable stability in the amount of calculation saved. As the same values of ponder penalties give rise to similar efficiency outputs. By contrast, DeeBERT proves to be highly sensitive to the chosen value for the entropy hyperparameter. The robustness of our model appears to come from learning the efficiency mechanism rather than relying on a somewhat arbitrary heuristic for its control.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Computational Efficiency", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In addition, we find our model uses less lay- Figure 2 : Performance vs efficiency trade-offs for BERT-base and RoBERTa-base models using DACT-BERT (blue), DeeBERT (orange) and PaBEE (green). DACT-BERT and DeeBERT experiments were repeated three times for each hyper-parameter. Individual runs are shown with colored dots, and the average along with its confidence interval is shown using a band. In all figures the x-axis shows computation measured as the fraction of the layers used by the respective static backbone (shown as a black diamond). DistilBERT's relative perfomance is shown at the 50% computation mark using a black star. ers compared to DeeBERT (see example at Fig. 3 ), allowing us to prune the final layers. We explain this difference by noting that the entropy will remain high throughout the whole model for the case of difficult questions as it will be uncertain about the answer. On the other hand, any layer in DACT-BERT is capable of quitting computation if it believes future layers cannot answer with more certainty than its own, regardless of how certain the model actually is.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 46, |
| "end": 54, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 677, |
| "end": 684, |
| "text": "Fig. 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Computational Efficiency", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "This work explored the value of using the DACT algorithm with pre-trained Transformer architectures. This results in a fully differentiable model that explicitly learns how many Transformers blocks it needs to perform a specific task. Our results show that our approach, DACT-BERT, outperforms the current dynamic Transformer architectures in several tasks when significantly reducing computation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The random seeds were saved and will be published along with the code to facilitate replicating the results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was partially funded by the Centro Nacional de Inteligencia Artificial CENIA, FB210017, BASAL, ANID.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Universal transformers", |
| "authors": [ |
| { |
| "first": "Mostafa", |
| "middle": [], |
| "last": "Dehghani", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Gouws", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mostafa Dehghani, Stephan Gouws, Oriol Vinyals, Jakob Uszkoreit, and Lukasz Kaiser. 2019. Universal transformers. In International Conference on Learn- ing Representations.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Depth-adaptive transformer", |
| "authors": [ |
| { |
| "first": "Maha", |
| "middle": [], |
| "last": "Elbayad", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiatao", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maha Elbayad, Jiatao Gu, Edouard Grave, and Michael Auli. 2020. Depth-adaptive transformer. In Interna- tional Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Differentiable adaptive computation time for visual reasoning", |
| "authors": [ |
| { |
| "first": "Cristobal", |
| "middle": [], |
| "last": "Eyzaguirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Alvaro", |
| "middle": [], |
| "last": "Soto", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "12817--12825", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cristobal Eyzaguirre and Alvaro Soto. 2020. Differen- tiable adaptive computation time for visual reason- ing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12817-12825.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Adaptive computation time for recurrent neural networks", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Graves. 2016. Adaptive computation time for recur- rent neural networks. ArXiv, abs/1603.08983.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "On calibration of modern neural networks", |
| "authors": [ |
| { |
| "first": "Chuan", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoff", |
| "middle": [], |
| "last": "Pleiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Kilian", |
| "middle": [ |
| "Q" |
| ], |
| "last": "Weinberger", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 34th International Conference on Machine Learning", |
| "volume": "70", |
| "issue": "", |
| "pages": "1321--1330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q. Wein- berger. 2017. On calibration of modern neural net- works. In Proceedings of the 34th International Con- ference on Machine Learning, volume 70 of Pro- ceedings of Machine Learning Research, pages 1321- 1330. PMLR.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Dynamic neural networks: A survey", |
| "authors": [ |
| { |
| "first": "Yizeng", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Gao", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shiji", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Le", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Honghui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yulin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yizeng Han, Gao Huang, Shiji Song, Le Yang, Honghui Wang, and Yulin Wang. 2021. Dynamic neural net- works: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "TinyBERT: Distilling BERT for natural language understanding", |
| "authors": [ |
| { |
| "first": "Xiaoqi", |
| "middle": [], |
| "last": "Jiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yichun", |
| "middle": [], |
| "last": "Yin", |
| "suffix": "" |
| }, |
| { |
| "first": "Lifeng", |
| "middle": [], |
| "last": "Shang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Linlin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Fang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.findings-emnlp.372" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaoqi Jiao, Yichun Yin, Lifeng Shang, Xin Jiang, Xiao Chen, Linlin Li, Fang Wang, and Qun Liu. 2020. TinyBERT: Distilling BERT for natural language un- derstanding. In Findings of the Association for Com- putational Linguistics: EMNLP 2020. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Shallow-deep networks: Understanding and mitigating network overthinking", |
| "authors": [ |
| { |
| "first": "Yigitcan", |
| "middle": [], |
| "last": "Kaya", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanghyun", |
| "middle": [], |
| "last": "Hong", |
| "suffix": "" |
| }, |
| { |
| "first": "Tudor", |
| "middle": [], |
| "last": "Dumitras", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "3301--3310", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yigitcan Kaya, Sanghyun Hong, and Tudor Dumitras. 2019. Shallow-deep networks: Understanding and mitigating network overthinking. In International Conference on Machine Learning, pages 3301-3310. PMLR.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Revealing the dark secrets of BERT", |
| "authors": [ |
| { |
| "first": "Olga", |
| "middle": [], |
| "last": "Kovaleva", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexey", |
| "middle": [], |
| "last": "Romanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rogers", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rumshisky", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4365--4374", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1445" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Olga Kovaleva, Alexey Romanov, Anna Rogers, and Anna Rumshisky. 2019. Revealing the dark secrets of BERT. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 4365-4374, Hong Kong, China. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Albert: A lite bert for self-supervised learning of language representations", |
| "authors": [ |
| { |
| "first": "Zhenzhong", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingda", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Piyush", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2020. Albert: A lite bert for self-supervised learning of language representations. In International Confer- ence on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "FastBERT: a selfdistilling BERT with adaptive inference time", |
| "authors": [ |
| { |
| "first": "Weijie", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiruo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhe", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Haotang", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Ju", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "6035--6044", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.537" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Weijie Liu, Peng Zhou, Zhiruo Wang, Zhe Zhao, Haotang Deng, and Qi Ju. 2020. FastBERT: a self- distilling BERT with adaptive inference time. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 6035- 6044, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Faster depth-adaptive transformers", |
| "authors": [ |
| { |
| "first": "Yijin", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Fandong", |
| "middle": [], |
| "last": "Meng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yufeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinan", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yijin Liu, Fandong Meng, Jie Zhou, Yufeng Chen, and Jinan Xu. 2021. Faster depth-adaptive transformers.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Automatic differentiation in pytorch", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Paszke", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Soumith", |
| "middle": [], |
| "last": "Chintala", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [], |
| "last": "Chanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zachary", |
| "middle": [], |
| "last": "Devito", |
| "suffix": "" |
| }, |
| { |
| "first": "Zeming", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Alban", |
| "middle": [], |
| "last": "Desmaison", |
| "suffix": "" |
| }, |
| { |
| "first": "Luca", |
| "middle": [], |
| "last": "Antiga", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Lerer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. 2017. Automatic differentiation in pytorch.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A primer in BERTology: What we know about how BERT works", |
| "authors": [ |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rogers", |
| "suffix": "" |
| }, |
| { |
| "first": "Olga", |
| "middle": [], |
| "last": "Kovaleva", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rumshisky", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "8", |
| "issue": "", |
| "pages": "842--866", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00349" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anna Rogers, Olga Kovaleva, and Anna Rumshisky. 2020. A primer in BERTology: What we know about how BERT works. Transactions of the Association for Computational Linguistics, 8:842-866.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter", |
| "authors": [ |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2020. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Energy and policy considerations for deep learning in NLP", |
| "authors": [ |
| { |
| "first": "Emma", |
| "middle": [], |
| "last": "Strubell", |
| "suffix": "" |
| }, |
| { |
| "first": "Ananya", |
| "middle": [], |
| "last": "Ganesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/p19-1355" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emma Strubell, Ananya Ganesh, and Andrew McCal- lum. 2019. Energy and policy considerations for deep learning in NLP. In Proceedings of the 57th", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Annual Meeting of the Association for Computational Linguistics. Association for Computational Linguistics", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Annual Meeting of the Association for Computational Linguistics. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Patient knowledge distillation for BERT model compression", |
| "authors": [ |
| { |
| "first": "Siqi", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhe", |
| "middle": [], |
| "last": "Gan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingjing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4323--4332", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1441" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siqi Sun, Yu Cheng, Zhe Gan, and Jingjing Liu. 2019. Patient knowledge distillation for BERT model com- pression. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 4323-4332, Hong Kong, China. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "353--355", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-5446" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. 2018. GLUE: A multi-task benchmark and analysis platform for nat- ural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 353-355, Brussels, Belgium. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Transformers: State-of-the-art natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "Remi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Davison", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Shleifer", |
| "suffix": "" |
| }, |
| { |
| "first": "Clara", |
| "middle": [], |
| "last": "Patrick Von Platen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yacine", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Jernite", |
| "suffix": "" |
| }, |
| { |
| "first": "Canwen", |
| "middle": [], |
| "last": "Plu", |
| "suffix": "" |
| }, |
| { |
| "first": "Teven", |
| "middle": [ |
| "Le" |
| ], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sylvain", |
| "middle": [], |
| "last": "Scao", |
| "suffix": "" |
| }, |
| { |
| "first": "Mariama", |
| "middle": [], |
| "last": "Gugger", |
| "suffix": "" |
| }, |
| { |
| "first": "Quentin", |
| "middle": [], |
| "last": "Drame", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Lhoest", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-demos.6" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, Remi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Trans- formers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "DeeBERT: Dynamic early exiting for accelerating BERT inference", |
| "authors": [ |
| { |
| "first": "Ji", |
| "middle": [], |
| "last": "Xin", |
| "suffix": "" |
| }, |
| { |
| "first": "Raphael", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaejun", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaoliang", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2246--2251", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.204" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ji Xin, Raphael Tang, Jaejun Lee, Yaoliang Yu, and Jimmy Lin. 2020. DeeBERT: Dynamic early exiting for accelerating BERT inference. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2246-2251, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "BERxiT: Early exiting for BERT with better fine-tuning and extension to regression", |
| "authors": [ |
| { |
| "first": "Ji", |
| "middle": [], |
| "last": "Xin", |
| "suffix": "" |
| }, |
| { |
| "first": "Raphael", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaoliang", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume", |
| "volume": "", |
| "issue": "", |
| "pages": "91--104", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ji Xin, Raphael Tang, Yaoliang Yu, and Jimmy Lin. 2021. BERxiT: Early exiting for BERT with better fine-tuning and extension to regression. In Proceed- ings of the 16th Conference of the European Chap- ter of the Association for Computational Linguistics: Main Volume, pages 91-104, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Bert loses patience: Fast and robust inference with early exit", |
| "authors": [ |
| { |
| "first": "Wangchunshu", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Canwen", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Ge", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Mcauley", |
| "suffix": "" |
| }, |
| { |
| "first": "Ke", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "33", |
| "issue": "", |
| "pages": "18330--18341", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wangchunshu Zhou, Canwen Xu, Tao Ge, Julian McAuley, Ke Xu, and Furu Wei. 2020. Bert loses patience: Fast and robust inference with early exit. In Advances in Neural Information Processing Systems, volume 33, pages 18330-18341. Curran Associates, Inc.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "DACT-BERT adds an additional classification layer after each Transformer block, along with a sigmoidal confidence function. DACT-BERT combines the Transformer hidden state and the outputs and confidences of all earlier layers", |
| "num": null |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Frequency each Transformer block is used.", |
| "num": null |
| } |
| } |
| } |
| } |