File size: 5,879 Bytes
9696ede
 
4c475fd
ea6fa62
4c475fd
 
 
 
 
 
 
303d31c
 
 
 
4c475fd
 
303d31c
4c475fd
303d31c
 
d33990e
 
 
 
 
 
 
 
73b353b
 
 
 
d33990e
 
73b353b
d33990e
73b353b
 
ea6fa62
 
 
 
 
 
0bae1b9
 
ea6fa62
 
038c485
 
 
 
ea6fa62
 
038c485
ea6fa62
038c485
 
a352351
 
 
 
 
 
0b61867
 
a352351
 
0b61867
 
 
 
5905c7d
 
 
 
a352351
 
5905c7d
a352351
5905c7d
 
4c475fd
 
 
 
 
d33990e
 
 
 
ea6fa62
 
 
 
a352351
 
 
 
9696ede
1475882
530a674
 
 
 
783701c
 
 
530a674
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
783701c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
530a674
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
---
license: mit
dataset_info:
- config_name: normal
  features:
  - name: task_id
    dtype: string
  - name: prompt
    dtype: string
  - name: entry_point
    dtype: string
  - name: entry_point_auxiliary
    dtype: string
  - name: test
    dtype: string
  splits:
  - name: test
    num_bytes: 293328
    num_examples: 151
  download_size: 130893
  dataset_size: 293328
- config_name: normal-instruct
  features:
  - name: task_id
    dtype: string
  - name: prompt
    dtype: string
  - name: entry_point
    dtype: string
  - name: entry_point_auxiliary
    dtype: string
  - name: test
    dtype: string
  splits:
  - name: test
    num_bytes: 302780
    num_examples: 151
  download_size: 129722
  dataset_size: 302780
- config_name: with_auxiliary
  features:
  - name: task_id
    dtype: string
  - name: prompt
    dtype: string
  - name: canonical_solution
    dtype: string
  - name: entry_point
    dtype: string
  - name: entry_point_auxiliary
    dtype: string
  - name: test
    dtype: string
  splits:
  - name: test
    num_bytes: 404289
    num_examples: 151
  download_size: 178426
  dataset_size: 404289
- config_name: with_auxiliary-instruct
  features:
  - name: task_id
    dtype: string
  - name: prompt
    dtype: string
  - name: canonical_solution
    dtype: string
  - name: entry_point
    dtype: string
  - name: response_prefix_normal
    dtype: string
  - name: response_prefix_with_auxiliary
    dtype: string
  - name: entry_point_auxiliary
    dtype: string
  - name: test
    dtype: string
  splits:
  - name: test
    num_bytes: 722371
    num_examples: 151
  download_size: 300098
  dataset_size: 722371
configs:
- config_name: normal
  data_files:
  - split: test
    path: normal/test-*
- config_name: normal-instruct
  data_files:
  - split: test
    path: normal-instruct/test-*
- config_name: with_auxiliary
  data_files:
  - split: test
    path: with_auxiliary/test-*
- config_name: with_auxiliary-instruct
  data_files:
  - split: test
    path: with_auxiliary-instruct/test-*
---

Related github repository: https://github.com/sh0416/humanextension

Also, please cite the following paper if you use this evaluation set in your experiments.

* https://aclanthology.org/2024.findings-naacl.181/
* https://aclanthology.org/2024.findings-emnlp.100/
* 
```
@inproceedings{lee-etal-2024-exploring,
    title = "Exploring Language Model`s Code Generation Ability with Auxiliary Functions",
    author = "Lee, Seonghyeon  and
      Jang, Sanghwan  and
      Jang, Seongbo  and
      Lee, Dongha  and
      Yu, Hwanjo",
    editor = "Duh, Kevin  and
      Gomez, Helena  and
      Bethard, Steven",
    booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
    month = jun,
    year = "2024",
    address = "Mexico City, Mexico",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2024.findings-naacl.181/",
    doi = "10.18653/v1/2024.findings-naacl.181",
    pages = "2836--2848",
    abstract = "Auxiliary function is a helpful component to improve language model`s code generation ability. However, a systematic exploration of how they affect has yet to be done. In this work, we comprehensively evaluate the ability to utilize auxiliary functions encoded in recent code-pretrained language models. First, we construct a human-crafted evaluation set, called HumanExtension, which contains examples of two functions where one function assists the other.With HumanExtension, we design several experiments to examine their ability in a multifaceted way. Our evaluation processes enable a comprehensive understanding of including auxiliary functions in the prompt in terms of effectiveness and robustness. An additional implementation style analysis captures the models' various implementation patterns when they access the auxiliary function. Through this analysis, we discover the models' promising ability to utilize auxiliary functions including their self-improving behavior by implementing the two functions step-by-step. However, our analysis also reveals the model`s underutilized behavior to call the auxiliary function, suggesting the future direction to enhance their implementation by eliciting the auxiliary function call ability encoded in the models. We release our code and dataset to facilitate this research direction."
}

@inproceedings{lee-etal-2024-eliciting,
    title = "Eliciting Instruction-tuned Code Language Models' Capabilities to Utilize Auxiliary Function for Code Generation",
    author = "Lee, Seonghyeon  and
      Kim, Suyeon  and
      Jang, Joonwon  and
      Chon, HeeJae  and
      Lee, Dongha  and
      Yu, Hwanjo",
    editor = "Al-Onaizan, Yaser  and
      Bansal, Mohit  and
      Chen, Yun-Nung",
    booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
    month = nov,
    year = "2024",
    address = "Miami, Florida, USA",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2024.findings-emnlp.100/",
    doi = "10.18653/v1/2024.findings-emnlp.100",
    pages = "1840--1846",
    abstract = "We study the code generation behavior of instruction-tuned models built on top of code pre-trained language models when they could access an auxiliary function to implement a function. We design several ways to provide auxiliary functions to the models by adding them to the query or providing a response prefix to incorporate the ability to utilize auxiliary functions with the instruction-following capability. Our experimental results show the effectiveness of combining the base models' auxiliary function utilization ability with the instruction following ability. In particular, the performance of adopting our approaches with the open-sourced language models surpasses that of the recent powerful language models, i.e., gpt-4o."
}
```