|
|
--- |
|
|
license: mit |
|
|
dataset_info: |
|
|
- config_name: normal |
|
|
features: |
|
|
- name: task_id |
|
|
dtype: string |
|
|
- name: prompt |
|
|
dtype: string |
|
|
- name: entry_point |
|
|
dtype: string |
|
|
- name: entry_point_auxiliary |
|
|
dtype: string |
|
|
- name: test |
|
|
dtype: string |
|
|
splits: |
|
|
- name: test |
|
|
num_bytes: 293328 |
|
|
num_examples: 151 |
|
|
download_size: 130893 |
|
|
dataset_size: 293328 |
|
|
- config_name: normal-instruct |
|
|
features: |
|
|
- name: task_id |
|
|
dtype: string |
|
|
- name: prompt |
|
|
dtype: string |
|
|
- name: entry_point |
|
|
dtype: string |
|
|
- name: entry_point_auxiliary |
|
|
dtype: string |
|
|
- name: test |
|
|
dtype: string |
|
|
splits: |
|
|
- name: test |
|
|
num_bytes: 302780 |
|
|
num_examples: 151 |
|
|
download_size: 129722 |
|
|
dataset_size: 302780 |
|
|
- config_name: with_auxiliary |
|
|
features: |
|
|
- name: task_id |
|
|
dtype: string |
|
|
- name: prompt |
|
|
dtype: string |
|
|
- name: canonical_solution |
|
|
dtype: string |
|
|
- name: entry_point |
|
|
dtype: string |
|
|
- name: entry_point_auxiliary |
|
|
dtype: string |
|
|
- name: test |
|
|
dtype: string |
|
|
splits: |
|
|
- name: test |
|
|
num_bytes: 404289 |
|
|
num_examples: 151 |
|
|
download_size: 178426 |
|
|
dataset_size: 404289 |
|
|
- config_name: with_auxiliary-instruct |
|
|
features: |
|
|
- name: task_id |
|
|
dtype: string |
|
|
- name: prompt |
|
|
dtype: string |
|
|
- name: canonical_solution |
|
|
dtype: string |
|
|
- name: entry_point |
|
|
dtype: string |
|
|
- name: response_prefix_normal |
|
|
dtype: string |
|
|
- name: response_prefix_with_auxiliary |
|
|
dtype: string |
|
|
- name: entry_point_auxiliary |
|
|
dtype: string |
|
|
- name: test |
|
|
dtype: string |
|
|
splits: |
|
|
- name: test |
|
|
num_bytes: 722371 |
|
|
num_examples: 151 |
|
|
download_size: 300098 |
|
|
dataset_size: 722371 |
|
|
configs: |
|
|
- config_name: normal |
|
|
data_files: |
|
|
- split: test |
|
|
path: normal/test-* |
|
|
- config_name: normal-instruct |
|
|
data_files: |
|
|
- split: test |
|
|
path: normal-instruct/test-* |
|
|
- config_name: with_auxiliary |
|
|
data_files: |
|
|
- split: test |
|
|
path: with_auxiliary/test-* |
|
|
- config_name: with_auxiliary-instruct |
|
|
data_files: |
|
|
- split: test |
|
|
path: with_auxiliary-instruct/test-* |
|
|
--- |
|
|
|
|
|
Related github repository: https://github.com/sh0416/humanextension |
|
|
|
|
|
Also, please cite the following paper if you use this evaluation set in your experiments. |
|
|
|
|
|
* https://aclanthology.org/2024.findings-naacl.181/ |
|
|
* https://aclanthology.org/2024.findings-emnlp.100/ |
|
|
* |
|
|
``` |
|
|
@inproceedings{lee-etal-2024-exploring, |
|
|
title = "Exploring Language Model`s Code Generation Ability with Auxiliary Functions", |
|
|
author = "Lee, Seonghyeon and |
|
|
Jang, Sanghwan and |
|
|
Jang, Seongbo and |
|
|
Lee, Dongha and |
|
|
Yu, Hwanjo", |
|
|
editor = "Duh, Kevin and |
|
|
Gomez, Helena and |
|
|
Bethard, Steven", |
|
|
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024", |
|
|
month = jun, |
|
|
year = "2024", |
|
|
address = "Mexico City, Mexico", |
|
|
publisher = "Association for Computational Linguistics", |
|
|
url = "https://aclanthology.org/2024.findings-naacl.181/", |
|
|
doi = "10.18653/v1/2024.findings-naacl.181", |
|
|
pages = "2836--2848", |
|
|
abstract = "Auxiliary function is a helpful component to improve language model`s code generation ability. However, a systematic exploration of how they affect has yet to be done. In this work, we comprehensively evaluate the ability to utilize auxiliary functions encoded in recent code-pretrained language models. First, we construct a human-crafted evaluation set, called HumanExtension, which contains examples of two functions where one function assists the other.With HumanExtension, we design several experiments to examine their ability in a multifaceted way. Our evaluation processes enable a comprehensive understanding of including auxiliary functions in the prompt in terms of effectiveness and robustness. An additional implementation style analysis captures the models' various implementation patterns when they access the auxiliary function. Through this analysis, we discover the models' promising ability to utilize auxiliary functions including their self-improving behavior by implementing the two functions step-by-step. However, our analysis also reveals the model`s underutilized behavior to call the auxiliary function, suggesting the future direction to enhance their implementation by eliciting the auxiliary function call ability encoded in the models. We release our code and dataset to facilitate this research direction." |
|
|
} |
|
|
|
|
|
@inproceedings{lee-etal-2024-eliciting, |
|
|
title = "Eliciting Instruction-tuned Code Language Models' Capabilities to Utilize Auxiliary Function for Code Generation", |
|
|
author = "Lee, Seonghyeon and |
|
|
Kim, Suyeon and |
|
|
Jang, Joonwon and |
|
|
Chon, HeeJae and |
|
|
Lee, Dongha and |
|
|
Yu, Hwanjo", |
|
|
editor = "Al-Onaizan, Yaser and |
|
|
Bansal, Mohit and |
|
|
Chen, Yun-Nung", |
|
|
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024", |
|
|
month = nov, |
|
|
year = "2024", |
|
|
address = "Miami, Florida, USA", |
|
|
publisher = "Association for Computational Linguistics", |
|
|
url = "https://aclanthology.org/2024.findings-emnlp.100/", |
|
|
doi = "10.18653/v1/2024.findings-emnlp.100", |
|
|
pages = "1840--1846", |
|
|
abstract = "We study the code generation behavior of instruction-tuned models built on top of code pre-trained language models when they could access an auxiliary function to implement a function. We design several ways to provide auxiliary functions to the models by adding them to the query or providing a response prefix to incorporate the ability to utilize auxiliary functions with the instruction-following capability. Our experimental results show the effectiveness of combining the base models' auxiliary function utilization ability with the instruction following ability. In particular, the performance of adopting our approaches with the open-sourced language models surpasses that of the recent powerful language models, i.e., gpt-4o." |
|
|
} |
|
|
``` |