Datasets:
Tasks:
Text Classification
Formats:
json
Sub-tasks:
natural-language-inference
Languages:
English
Size:
1K - 10K
ArXiv:
License:
| \documentclass[varwidth=598pt]{standalone} | |
| \usepackage{booktabs} % professional-quality tables | |
| \usepackage{multirow} % multi-row cells in tables | |
| \usepackage{colortbl} % color for tables (\cellcolor, \rowcolor) | |
| \usepackage[table]{xcolor} % enhanced colors for tables | |
| \usepackage{array} % more flexible column formats (often used) | |
| \usepackage{tabularx} % for tables with auto-stretch columns (if used) | |
| \usepackage{graphicx} % for images in tables or figure/table floats | |
| \usepackage{amssymb} % math symbols | |
| \usepackage{amsmath} % math environments | |
| \usepackage{soul} % highlighting (used for colored text/cells) | |
| \usepackage[normalem]{ulem} % underlining, strikethroughs | |
| \usepackage[T1]{fontenc} % font encoding | |
| \usepackage[utf8]{inputenc}% input encoding (legacy, fine for pdflatex) | |
| \usepackage{microtype} % better text appearance | |
| \usepackage{hyperref} % hyperlinks | |
| \usepackage{textcomp} % for extra symbols | |
| \usepackage{enumitem} % for compact lists (if used in table notes) | |
| \usepackage{adjustbox} | |
| \usepackage{tabu} | |
| \usepackage{pifont} % http://ctan.org/pkg/pifont | |
| \usepackage{bbding} % \XSolidBrush | |
| \usepackage{makecell} | |
| \begingroup | |
| \makeatletter | |
| \renewcommand{\fnum@table}{}% | |
| \long\def\@makecaption#1#2{% | |
| \vskip\abovecaptionskip | |
| \centering #2\par | |
| \vskip\belowcaptionskip | |
| } | |
| \makeatother | |
| \begin{document} | |
| \begin{table} | |
| \centering | |
| \begin{adjustbox}{max width=\textwidth} | |
| \begin{tabular}{l|c|cccccccc|c} % | |
| \toprule | |
| & \textbf{Memory (GB)} & \textbf{CoLA} & \textbf{STS-B} & \textbf{MRPC} & \textbf{RTE} & \textbf{SST2} & \textbf{MNLI} & \textbf{QNLI} & \textbf{QQP} & \textbf{Avg} \\ | |
| \midrule | |
| Full Fine-Tuning & 4.64 & 62.24 & 90.92 & 91.30 & 79.42 & 94.57 & 87.18 & 92.33 & 92.28 & 86.28 \\ | |
| \midrule | |
| GaLore & 4.04 & 60.35 & 90.73 & \textbf{92.25} & \textbf{79.42} & 94.04 & \textbf{87.00} & \textbf{92.24} & 91.06 & 85.89 \\ | |
| LoRA & 2.71 & 61.38 & 90.57 & 91.07 & 78.70 & 92.89 & 86.82 & 92.18 & \textbf{91.29} & 85.61 \\ | |
| VeLoRA & \textbf{2.70} & \textbf{64.56} & \textbf{90.81} & 91.26 & 77.98 & \textbf{94.38} & 86.29 & 92.09 & 89.91 & \textbf{85.91} \\ | |
| \bottomrule | |
| \end{tabular} | |
| \end{adjustbox} | |
| \caption{Table 2 : Comparison of our method with full fine-tuning, GaLore and LORA on GLUE benchmark using pre-trained RoBERTa-Base. Our method reaches the best overall results while showing significant memory improvements, especially compared to GaLore. We bold the best results from the considered PEFT methods. The GPU memory is measured on-device.} | |
| \end{table} | |
| \end{document} |