Add Batch 0d68f3e4-4646-4d1a-a1f0-831b958536d0
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- abenchmarkforlearningtotranslateanewlanguagefromonegrammarbook/9fda41e2-544c-4f91-8ab6-462a4a922809_content_list.json +3 -0
- abenchmarkforlearningtotranslateanewlanguagefromonegrammarbook/9fda41e2-544c-4f91-8ab6-462a4a922809_model.json +3 -0
- abenchmarkforlearningtotranslateanewlanguagefromonegrammarbook/9fda41e2-544c-4f91-8ab6-462a4a922809_origin.pdf +3 -0
- abenchmarkforlearningtotranslateanewlanguagefromonegrammarbook/full.md +0 -0
- abenchmarkforlearningtotranslateanewlanguagefromonegrammarbook/images.zip +3 -0
- abenchmarkforlearningtotranslateanewlanguagefromonegrammarbook/layout.json +3 -0
- acceleratingdatagenerationforneuraloperatorsviakrylovsubspacerecycling/b7e96783-9a06-4c6f-968b-4f55e8bde611_content_list.json +3 -0
- acceleratingdatagenerationforneuraloperatorsviakrylovsubspacerecycling/b7e96783-9a06-4c6f-968b-4f55e8bde611_model.json +3 -0
- acceleratingdatagenerationforneuraloperatorsviakrylovsubspacerecycling/b7e96783-9a06-4c6f-968b-4f55e8bde611_origin.pdf +3 -0
- acceleratingdatagenerationforneuraloperatorsviakrylovsubspacerecycling/full.md +0 -0
- acceleratingdatagenerationforneuraloperatorsviakrylovsubspacerecycling/images.zip +3 -0
- acceleratingdatagenerationforneuraloperatorsviakrylovsubspacerecycling/layout.json +3 -0
- adaptivechameleonorstubbornslothrevealingthebehavioroflargelanguagemodelsinknowledgeconflicts/8337521b-fdf8-4327-bda9-64a1530644e8_content_list.json +3 -0
- adaptivechameleonorstubbornslothrevealingthebehavioroflargelanguagemodelsinknowledgeconflicts/8337521b-fdf8-4327-bda9-64a1530644e8_model.json +3 -0
- adaptivechameleonorstubbornslothrevealingthebehavioroflargelanguagemodelsinknowledgeconflicts/8337521b-fdf8-4327-bda9-64a1530644e8_origin.pdf +3 -0
- adaptivechameleonorstubbornslothrevealingthebehavioroflargelanguagemodelsinknowledgeconflicts/full.md +458 -0
- adaptivechameleonorstubbornslothrevealingthebehavioroflargelanguagemodelsinknowledgeconflicts/images.zip +3 -0
- adaptivechameleonorstubbornslothrevealingthebehavioroflargelanguagemodelsinknowledgeconflicts/layout.json +3 -0
- adaptiverationalactivationstoboostdeepreinforcementlearning/823c34dc-c426-4eb3-828d-ee51078c5e70_content_list.json +3 -0
- adaptiverationalactivationstoboostdeepreinforcementlearning/823c34dc-c426-4eb3-828d-ee51078c5e70_model.json +3 -0
- adaptiverationalactivationstoboostdeepreinforcementlearning/823c34dc-c426-4eb3-828d-ee51078c5e70_origin.pdf +3 -0
- adaptiverationalactivationstoboostdeepreinforcementlearning/full.md +457 -0
- adaptiverationalactivationstoboostdeepreinforcementlearning/images.zip +3 -0
- adaptiverationalactivationstoboostdeepreinforcementlearning/layout.json +3 -0
- addressingsignaldelayindeepreinforcementlearning/30c95151-f2de-4045-ab49-75e38047ad97_content_list.json +3 -0
- addressingsignaldelayindeepreinforcementlearning/30c95151-f2de-4045-ab49-75e38047ad97_model.json +3 -0
- addressingsignaldelayindeepreinforcementlearning/30c95151-f2de-4045-ab49-75e38047ad97_origin.pdf +3 -0
- addressingsignaldelayindeepreinforcementlearning/full.md +665 -0
- addressingsignaldelayindeepreinforcementlearning/images.zip +3 -0
- addressingsignaldelayindeepreinforcementlearning/layout.json +3 -0
- adversarialautomixup/51bc7332-976c-4305-8ab7-b6e024fbaeec_content_list.json +3 -0
- adversarialautomixup/51bc7332-976c-4305-8ab7-b6e024fbaeec_model.json +3 -0
- adversarialautomixup/51bc7332-976c-4305-8ab7-b6e024fbaeec_origin.pdf +3 -0
- adversarialautomixup/full.md +502 -0
- adversarialautomixup/images.zip +3 -0
- adversarialautomixup/layout.json +3 -0
- ageneralframeworkforuserguidedbayesianoptimization/168ed08a-b567-41d2-abd2-5e82251d79b2_content_list.json +3 -0
- ageneralframeworkforuserguidedbayesianoptimization/168ed08a-b567-41d2-abd2-5e82251d79b2_model.json +3 -0
- ageneralframeworkforuserguidedbayesianoptimization/168ed08a-b567-41d2-abd2-5e82251d79b2_origin.pdf +3 -0
- ageneralframeworkforuserguidedbayesianoptimization/full.md +413 -0
- ageneralframeworkforuserguidedbayesianoptimization/images.zip +3 -0
- ageneralframeworkforuserguidedbayesianoptimization/layout.json +3 -0
- ahierarchicalbayesianmodelforfewshotmetalearning/835d9bdc-b1d0-4584-861e-7d0b76aaea95_content_list.json +3 -0
- ahierarchicalbayesianmodelforfewshotmetalearning/835d9bdc-b1d0-4584-861e-7d0b76aaea95_model.json +3 -0
- ahierarchicalbayesianmodelforfewshotmetalearning/835d9bdc-b1d0-4584-861e-7d0b76aaea95_origin.pdf +3 -0
- ahierarchicalbayesianmodelforfewshotmetalearning/full.md +0 -0
- ahierarchicalbayesianmodelforfewshotmetalearning/images.zip +3 -0
- ahierarchicalbayesianmodelforfewshotmetalearning/layout.json +3 -0
- alightweightmethodfortacklingunknownparticipationstatisticsinfederatedaveraging/95f5339c-dfa7-49a6-b487-698cb4f07243_content_list.json +3 -0
- alightweightmethodfortacklingunknownparticipationstatisticsinfederatedaveraging/95f5339c-dfa7-49a6-b487-698cb4f07243_model.json +3 -0
abenchmarkforlearningtotranslateanewlanguagefromonegrammarbook/9fda41e2-544c-4f91-8ab6-462a4a922809_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c0422db575325b545eb7b4285dc3a7e2e123436829eafb9da108c7ab282ffcaf
|
| 3 |
+
size 180584
|
abenchmarkforlearningtotranslateanewlanguagefromonegrammarbook/9fda41e2-544c-4f91-8ab6-462a4a922809_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:041ee4de4f3b21867fa39eed907ace484c5e148768ce78c2393b72d09e8e20fe
|
| 3 |
+
size 214105
|
abenchmarkforlearningtotranslateanewlanguagefromonegrammarbook/9fda41e2-544c-4f91-8ab6-462a4a922809_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a9e0c25e66e1b44290d7120889ca614c836571f7d3ea62234d77e6c56bba727f
|
| 3 |
+
size 436182
|
abenchmarkforlearningtotranslateanewlanguagefromonegrammarbook/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
abenchmarkforlearningtotranslateanewlanguagefromonegrammarbook/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:18e24cd51d0ca77ff9f14b0c377007e5b63dccfbd3b432565286e0c6d893994f
|
| 3 |
+
size 1067044
|
abenchmarkforlearningtotranslateanewlanguagefromonegrammarbook/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:92f7f7f75fa5cff9899e2f132659fc48ac201b96a71bfbca780988d42f708b03
|
| 3 |
+
size 701974
|
acceleratingdatagenerationforneuraloperatorsviakrylovsubspacerecycling/b7e96783-9a06-4c6f-968b-4f55e8bde611_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3abe9fe087bab09e6812b1a85fda30ed767ec2aaf5e16067ef07a29ad263a20c
|
| 3 |
+
size 248461
|
acceleratingdatagenerationforneuraloperatorsviakrylovsubspacerecycling/b7e96783-9a06-4c6f-968b-4f55e8bde611_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e849b4f0e9ffce0dd04c7bf17f81dc639556dff7acc17e461a8b324d5fe9adf
|
| 3 |
+
size 297217
|
acceleratingdatagenerationforneuraloperatorsviakrylovsubspacerecycling/b7e96783-9a06-4c6f-968b-4f55e8bde611_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:71037aae5d261101d1a936143d310e3b286d9595ee6effd845a78d701176db5d
|
| 3 |
+
size 1250391
|
acceleratingdatagenerationforneuraloperatorsviakrylovsubspacerecycling/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
acceleratingdatagenerationforneuraloperatorsviakrylovsubspacerecycling/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bbc89094f9912f308149b4d094a9e6fea58505026eb7d113b844cea9894f05bc
|
| 3 |
+
size 4091361
|
acceleratingdatagenerationforneuraloperatorsviakrylovsubspacerecycling/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3c084ca6374c2663ef0baf43052c57c47982bcdde9fddeca9477c6c5b0b8131b
|
| 3 |
+
size 1076756
|
adaptivechameleonorstubbornslothrevealingthebehavioroflargelanguagemodelsinknowledgeconflicts/8337521b-fdf8-4327-bda9-64a1530644e8_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4705c3af0eb48aee666d7575cd86e1d1f2038aaf3294d242d8d4a46f147a8223
|
| 3 |
+
size 136902
|
adaptivechameleonorstubbornslothrevealingthebehavioroflargelanguagemodelsinknowledgeconflicts/8337521b-fdf8-4327-bda9-64a1530644e8_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:12097a1ee81a4f73f2d7f81efb49d25514ebb8362498ba2987872cb9db379738
|
| 3 |
+
size 166156
|
adaptivechameleonorstubbornslothrevealingthebehavioroflargelanguagemodelsinknowledgeconflicts/8337521b-fdf8-4327-bda9-64a1530644e8_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:066698beaf9b2a7ebee39d8c26185d1f5f90da9e853a49789cbcc7f51aae1308
|
| 3 |
+
size 675729
|
adaptivechameleonorstubbornslothrevealingthebehavioroflargelanguagemodelsinknowledgeconflicts/full.md
ADDED
|
@@ -0,0 +1,458 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adaptive Chameleon or Stubborn Sloth: REVEALING THE BEHAVIOR OF LARGE LANGUAGE MODELS IN KNOWLEDGE CONFLICTS
|
| 2 |
+
|
| 3 |
+
Jian Xie\* Kai Zhang\* Jiangjie Chen\* Renze Lou\* Yu Su\*
|
| 4 |
+
|
| 5 |
+
$\spadesuit$ School of Computer Science, Fudan University
|
| 6 |
+
$\clubsuit$ The Ohio State University $\hat{\mathbb{O}}$ The Pennsylvania State University
|
| 7 |
+
|
| 8 |
+
jianxie22@m.fudan.edu.cn, {zhang.13253, su.809}@osu.edu
|
| 9 |
+
|
| 10 |
+
# ABSTRACT
|
| 11 |
+
|
| 12 |
+
By providing external information to large language models (LLMs), tool augmentation (including retrieval augmentation) has emerged as a promising solution for addressing the limitations of LLMs' static parametric memory. However, how receptive are LLMs to such external evidence, especially when the evidence conflicts with their parametric memory? We present the first comprehensive and controlled investigation into the behavior of LLMs when encountering knowledge conflicts. We propose a systematic framework to elicit high-quality parametric memory from LLMs and construct the corresponding counter-memory, which enables us to conduct a series of controlled experiments. Our investigation reveals seemingly contradicting behaviors of LLMs. On the one hand, different from prior wisdom, we find that LLMs can be highly receptive to external evidence even when that conflicts with their parametric memory, given that the external evidence is coherent and convincing. On the other hand, LLMs also demonstrate a strong confirmation bias when the external evidence contains some information that is consistent with their parametric memory, despite being presented with conflicting evidence at the same time. These results pose important implications that are worth careful consideration for the further development and deployment of tool- and retrieval-augmented LLMs. Resources are available at https://github.com/OSU-NLP-Group/LLM-Knowledge-Conflict.
|
| 13 |
+
|
| 14 |
+
# 1 INTRODUCTION
|
| 15 |
+
|
| 16 |
+
After pre-training on massive corpora, large language models (LLMs) (Brown et al., 2020; Chowdhery et al., 2022; Ouyang et al., 2022; OpenAI, 2022; 2023; Zeng et al., 2023; Touvron et al., 2023a) have formed a wealth of parametric memory, such as commonsense and factual knowledge (Petroni et al., 2019; Li et al., 2022; Zhao et al., 2023). However, such parametric memory may be inaccurate or become outdated (Liska et al., 2022; Luu et al., 2022) due to misinformation in the pre-training corpus or the static nature of parametric memory, known to be a major cause for hallucinations (Elazar et al., 2021; Shuster et al., 2021; Ji et al., 2023).
|
| 17 |
+
|
| 18 |
+
Tool<sup>1</sup> (Schick et al., 2023; Qin et al., 2023) or retrieval augmentation (Mallen et al., 2022; Shi et al., 2023b; Ram et al., 2023) has emerged as a promising solution by providing external information as new evidence to LLMs, such as ChatGPT Plugins and New Bing. However, external evidence, inevitably, could conflict with LLMs' parametric memory. We refer to external evidence that conflicts with parametric memory as counter-memory. In this paper, we seek to answer the question: how receptive are LLMs to external evidence, especially counter-memory? A solid understanding of this question is an essential stepping stone for wider application of tool-augmented LLMs. Not only does this relate to overcoming the limitations of LLM's static parametric memory, but it is also associated
|
| 19 |
+
|
| 20 |
+
with direct safety concerns. For example, what if a third-party tool, either by the developer or hijacked by attackers, intentionally returns disinformation? Will LLMs be deceived?
|
| 21 |
+
|
| 22 |
+
We present the first comprehensive and controlled investigation into the behavior of LLMs when encountering counter-memory. A key challenge lies in how to construct the counter-memory. Prior work employs various heuristics, such as negation injection (Niu & Bansal, 2018; Kassner et al., 2021; Gubelmann & Handschuh, 2022) and entity substitution (Longpre et al., 2021; Zhou et al., 2023), and finds that language models (both large and small) tend to be stubborn and cling to their parametric memory. However, such heuristic word-level editing results in incoherent counter-memory (see an example in Section 4.1), which may make it trivial for LLMs to detect and thus neglect the constructed counter-memory. It is unclear how the prior conclusions translate to real-world scenarios, where counter-memory is more coherent and convincing.
|
| 23 |
+
|
| 24 |
+
We propose a systematic framework to elicit the parametric memory of LLMs and construct the corresponding counter-memory. We design a series of checks, such as entailment from parametric memory to the answer, to ensure that the elicited parametric memory is indeed the LLM's internal belief. For the counter-memory, instead of heuristically editing the parametric memory, we instruct an LLM to directly generate a coherent passage that factually conflicts with the parametric memory. After obtaining a large pool of parametric memory and counter-memory pairs, we then examine LLMs' behavior in different knowledge conflict scenarios, including 1) when only counter-memory is present as external evidence and 2) when both parametric memory and counter-memory are present.
|
| 25 |
+
|
| 26 |
+
Our investigation leads to a series of interesting new findings. We highlight the following:
|
| 27 |
+
|
| 28 |
+
- LLMs are highly receptive to external evidence if that is the only evidence, even when it conflicts with their parametric memory. This contradicts the prior wisdom (Longpre et al., 2021), and we attribute this to the more coherent and convincing counter-memory constructed through our framework. On the other hand, this also suggests that LLMs may be easily deceived by, e.g., disinformation from malicious (third-party) tools.
|
| 29 |
+
- However, with both supportive and contradictory evidence to their parametric memory, LLMs show a strong confirmation bias (Nickerson, 1998) and tend to cling to their parametric memory. This reveals a potential challenge for LLMs to unbiasedly orchestrate multiple pieces of conflicting evidence, a common situation encountered by generative search engines.
|
| 30 |
+
|
| 31 |
+
# 2 RELATED WORK
|
| 32 |
+
|
| 33 |
+
Parametric Memory in Language Models After pre-training, language models have internalized a vast amount of knowledge into their parameters (Roberts et al., 2020; Jiang et al., 2020), also known as parametric memory. Many past studies have explored the elicitation of parametric memory in language models, such as commonsense or factual knowledge probing (Petroni et al., 2019; Lin et al., 2020; Zhang et al., 2021; West et al., 2022; Chen et al., 2023; Wang et al., 2023). Such parametric memory could help solve downstream tasks (Wang et al., 2021; Yu et al., 2023; Sun et al., 2023). However, previous work has discovered that language models only memorize a small portion of the knowledge they have been exposed to during pre-training (Carlini et al., 2021; 2023) due to model's limited memorization abilities. In addition, the parametric memory may become outdated (Lazaridou et al., 2021; De Cao et al., 2021). Such incorrect and outdated parametric memory may show as hallucinations (Elazar et al., 2021; Shuster et al., 2021; Ji et al., 2023). Although some methods are proposed to edit knowledge in language models (Dai et al., 2022; Meng et al., 2022; 2023), they typically require additional modifications on model weights without evaluating the consequences on models' other aspects such as performances and are limited to factual knowledge.
|
| 34 |
+
|
| 35 |
+
Tool-augmented Language Models To address the limitations of parametric memory, external tools such as retrievers are used to augment language models with up-to-date information, namely tool-augmented (Nakano et al., 2021; Yao et al., 2023; Qin et al., 2023; Schick et al., 2023; Lu et al., 2023) or retrieval-augmented (Guu et al., 2020; Khandelwal et al., 2020; Izacard & Grave, 2021; Borgeaud et al., 2022; Zhong et al., 2022) language models. Such a framework, which has proven its efficacy in enhancing large language models (Shi et al., 2023b; Ram et al., 2023; Mallen et al., 2022), is adopted in real-world applications such as New Bing and ChatGPT Plugins. Inevitably, the external evidence could conflict with the parametric memory. However, the behavior of LLMs in
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
Figure 1: Our framework for simulating knowledge conflicts. In Step 1, we elicit LLMs' memory answer and parametric memory in a closed-book QA fashion. In Step 2, we construct counter-answer to memory answer with heuristic rules, for which ChatGPT generates supporting counter-memory with instructions. To uphold evidence quality, we conduct entailment checking (Step 3) and answer consistency (Step 4) to filter unqualified examples. All experiments are implemented under zero-shot setting to avoid the bias introduced by demonstrations.
|
| 39 |
+
|
| 40 |
+
knowledge conflict scenarios remains under-explored, and unraveling it holds significance for wider applications of tool-augmented LLMs.
|
| 41 |
+
|
| 42 |
+
Knowledge Conflict To perform controlled experiments, knowledge conflict is often simulated with counter-memory constructed upon parametric memory. Heuristic counter-memory construction methods such as negation injection (Niu & Bansal, 2018; Kassner et al., 2021; Petroni et al., 2020; Pan et al., 2021) have been developed. Furthermore, entity substitution (Longpre et al., 2021; Chen et al., 2022; Si et al., 2023; Zhou et al., 2023) replaces all mentions of the answer entity in parametric memory with other entities to construct counter-memory. However, these methods are limited to word-level editing, leading to low overall coherence in the counter-memory. We instead instruct LLMs to generate counter-memory from scratch to ensure high coherence.
|
| 43 |
+
|
| 44 |
+
# 3 EXPERIMENTAL SETUP
|
| 45 |
+
|
| 46 |
+
In this section, we describe our framework for eliciting high-quality parametric memory from LLMs and constructing the corresponding counter-memory, as well as the evaluation metrics.
|
| 47 |
+
|
| 48 |
+
# 3.1 DATASETS
|
| 49 |
+
|
| 50 |
+
Following prior work (Longpre et al., 2021; Chen et al., 2022), we adopt question answering (QA) task as the testbed for knowledge conflict experiments. In addition to an entity-based QA dataset (POPQA), we include a multi-step reasoning dataset (STRATEGYQA) for diversifying the questions studied in the experiments. Specifically,
|
| 51 |
+
|
| 52 |
+
- POPQA (Mallen et al., 2022) is an entity-centric QA dataset that contains 14K questions. Data for POPQA originates from triples in Wikidata. Employing custom templates tailored to relationship types, the authors construct questions through the substitution of the subject within knowledge triples. POPQA defines the popularity of a question based on the monthly Wikipedia page views associated with the entity mentioned in the question.
|
| 53 |
+
- STRATEGYQA (Geva et al., 2021) is a multi-step fact reasoning benchmark that necessitates the implicit question decomposition into reasoning steps. The questions are built around Wikipedia terms and cover a wide range of strategies, which demand the model's capability to select and integrate relevant knowledge effectively. The language model is expected to provide a True or False answer.
|
| 54 |
+
|
| 55 |
+
Table 1: The correctness of LLMs responses in closed-book QA fashion (Step 1 in Figure 1). We examine eight LLMs, including three closed-source LLMs and five open-source LLMs.
|
| 56 |
+
|
| 57 |
+
<table><tr><td rowspan="2">Models</td><td colspan="3">POPQA</td><td colspan="3">STRATEGYQA</td></tr><tr><td>Correct</td><td>Wrong</td><td>Unknown</td><td>Correct</td><td>Wrong</td><td>Unknown</td></tr><tr><td colspan="7">Closed-source LLMs</td></tr><tr><td>ChatGPT (OpenAI, 2022)</td><td>44.6</td><td>44.4</td><td>11.0</td><td>67.4</td><td>30.7</td><td>1.9</td></tr><tr><td>GPT-4 (OpenAI, 2023)</td><td>50.8</td><td>48.7</td><td>0.5</td><td>77.3</td><td>22.7</td><td>0.0</td></tr><tr><td>PaLM2 (Anil et al., 2023)</td><td>32.9</td><td>67.1</td><td>0.0</td><td>67.9</td><td>32.1</td><td>0.0</td></tr><tr><td colspan="7">Open-source LLMs</td></tr><tr><td>Qwen-7B (Alibaba, 2023)</td><td>24.9</td><td>62.6</td><td>5.1</td><td>56.8</td><td>43.2</td><td>0.0</td></tr><tr><td>Llama2-7B (Touvron et al., 2023b)</td><td>24.1</td><td>75.9</td><td>0.0</td><td>56.7</td><td>43.3</td><td>0.0</td></tr><tr><td>Llama2-70B (Touvron et al., 2023b)</td><td>43.0</td><td>57.0</td><td>0.0</td><td>64.4</td><td>35.7</td><td>0.0</td></tr><tr><td>Vicuna-7B (Zheng et al., 2023)</td><td>23.8</td><td>69.3</td><td>6.9</td><td>55.0</td><td>45.0</td><td>0.0</td></tr><tr><td>Vicuna-33B (Zheng et al., 2023)</td><td>28.6</td><td>71.4</td><td>0.0</td><td>65.0</td><td>35.0</td><td>0.0</td></tr></table>
|
| 58 |
+
|
| 59 |
+
# 3.2 PARAMETRIC MEMORY ELICITATION
|
| 60 |
+
|
| 61 |
+
Step 1 in Figure 1 illustrates how we elicit parametric memory: in a closed-book QA fashion, LLMs recall their parametric memory to answer questions without any external evidence. Specifically, given a question, e.g., "Who is the chief scientist of Google DeepMind", LLMs are instructed to provide an answer "Demis Hassabis" and its supporting background information about how Demis founded and led DeepMind in detail. We cast the detailed background as parametric memory because the answer only represents the conclusion of parametric memory w.r.t. the given question.
|
| 62 |
+
|
| 63 |
+
Table 1 shows the closed-book results of LLMs on POPQA and STRATEGYQA. Notably, LLMs may respond with "Unknown" when no evidence is provided in the context, particularly in ChatGPT. Such answer abstention (Rajpurkar et al., 2018) suggests that LLMs fail to recall valid memory associated with the given question, so we discard them. For comprehensiveness, we also keep the examples that LLMs answer incorrectly in the closed-book paradigm because the wrong answer and associated memory are also stored in model parameters.
|
| 64 |
+
|
| 65 |
+
# 3.3 COUNTER-MEMORY CONSTRUCTION
|
| 66 |
+
|
| 67 |
+
As depicted in Figure 1, at Step 2, we reframe the memory answer "Demis Hassabis" to a counter-answer (e.g., "Jeff Dean"). Concretely, for POPQA, we substitute the entity in the memory answer with a same-type entity (e.g., from Demis to Jeff); while in STRATEGYQA, we flip the memory answer (e.g., from positive sentence to negative sentence). With counter-answer "Jeff Dean", we instruct ChatGPT² to make up supporting evidence that Jeff Dean serves as chief scientist of DeepMind. We term such evidence that conflicts with parametric memory as counter-memory.
|
| 68 |
+
|
| 69 |
+
Since the counter-memory is generated from scratch by powerful generative LLMs, it is more coherent compared to previous word-level editing methods (Longpre et al., 2021; Chen et al., 2022) performed on parametric memory. Both generated parametric memory and counter-memory could serve as external evidence for later experiments on LLMs in knowledge conflicts. Please refer to Appendix B.1 for more details of evidence construction in each dataset.
|
| 70 |
+
|
| 71 |
+
# 3.4 ANSWER-EVIDENCE ENTAILMENT CHECKING
|
| 72 |
+
|
| 73 |
+
An ideal piece of evidence should strongly support its answer. For instance, the parametric memory about Demis and DeepMind should clearly support the corresponding memory answer that Demis is the chief scientist of DeepMind. Similarly, counter-memory should clearly support the corresponding counter-answer as well. Therefore, for Step 3 shown in Figure 1, we utilize a natural language inference (NLI) model for support-checking to ensure the evidence indeed entails the answer. Specifically,
|
| 74 |
+
|
| 75 |
+
Table 2: Examples in final dataset. Both parametric memory and counter-memory are generated by ChatGPT. They are coherent and present an illusion of correctness even when factually incorrect.
|
| 76 |
+
|
| 77 |
+
<table><tr><td></td><td>Correct Memory</td><td>Wrong Memory</td></tr><tr><td>Question</td><td>What is the capital of Kingdom of France?</td><td>Who was the producer of The Cardinal?</td></tr><tr><td>Ground Truth</td><td>Paris</td><td>Otto Preminger</td></tr><tr><td>Parametric Memory</td><td>Paris was chosen as the capital of the Kingdom of France due to its central location and strategic importance. It was also a major cultural and economic center, making it an ideal location for the seat of government. Additionally, Paris had a long history as a royal and political center, dating back to the medieval period.</td><td>The Cardinal was produced by Martin Ransohoff, who was a prominent film and television producer in the mid-20th century. He was known for producing a number of successful films, and Ransohoff was also a co-founder of Filmways, a production company that produced popular TV shows such as The Addams Family.</td></tr><tr><td>Counter-memory</td><td>Néma is the capital of the Kingdom of France. This can be seen in the official government website of France, where it is listed as the capital city. Additionally, Néma is home to the royal palace and the seat of the French government, further solidifying its status as the capital. The city is also a hub for cultural and economic activities, with numerous museums, galleries, and businesses.</td><td>Otto Preminger was a prominent film producer in the mid-20th century, known for his work on a number of successful films. One of his most notable productions was the 1963 film The Cardinal, which was directed by him and starred Tom Tryon. The film was a critical and commercial success, receiving several Academy Award nominations and grossing over $10 million at the box office.</td></tr></table>
|
| 78 |
+
|
| 79 |
+
we use the state-of-the-art NLI model DeBERTa-V2 (He et al., 2021)<sup>3</sup> to determine whether both the parametric memory and counter-memory support their corresponding answers. We only keep the examples where both answers are supported for subsequent experiments.
|
| 80 |
+
|
| 81 |
+
To ensure the reliability of the selected NLI model, we manually evaluated 200 random examples and observed $99\%$ accuracy of the model. Please refer to Appendix B.5 for more details.
|
| 82 |
+
|
| 83 |
+
# 3.5 MEMORY ANSWER CONSISTENCY
|
| 84 |
+
|
| 85 |
+
We adopt another check (Step 4 of Figure 1) for further ensuring the data quality. If the parametric memory we elicit is truly the internal belief of an LLM's, presenting it explicitly as evidence should lead the LLM to provide the same answer as in the closed-book setting (Step 1). Therefore, in the evidence-based QA task format, we use the parametric memory as the sole evidence and instruct LLMs to answer the same question again. For example, given the parametric memory about Demis and DeepMind, LLMs should have a consistent response with the previous memory answer, that Demis is the chief scientist of DeepMind.
|
| 86 |
+
|
| 87 |
+
However, the answer inconsistency results in Table 3 show that LLMs may still change their answers when the parametric memory obtained in Step 1 is explicitly presented as evidence. This suggests that the LLM's internal belief on this parametric memory may not be firm (e.g., there may be competing answers that are equally plausible based on the LLM). We filter out such examples to ensure the remaining ones well capture an LLM's firm parametric memory.
|
| 88 |
+
|
| 89 |
+
After undergoing entailment and answer consistency checks, the remaining examples are likely to represent firm parametric memory and high-quality counter-memory, which lay a solid foundation for subsequent knowledge conflict experiments. Some examples from the final POPQA data are shown in Table 2 and the statistics of the final datasets are shown in Table 4. Please refer to Appendix B.2 for more details for Step 3 and 4 and examples.
|
| 90 |
+
|
| 91 |
+
# 3.6 EVALUATION METRICS
|
| 92 |
+
|
| 93 |
+
A single generation from an LLM could contain both the memory answer and the counter-answer, which poses a challenge to automatically determine the exact answer from an LLM. To address this issue, we transform the free-form QA to a multiple-choice QA format by providing a few options as possible answers. This limits the generation space and helps determine the answer provided by LLMs with certainty. Specifically, for each question from both datasets, LLMs are instructed to select one answer from memory answer (Mem-Ans.), counter-answer (Ctr-Ans.), and "Uncertain". Additionally, to quantify the frequency of LLMs sticking to their parametric memory, we adopt the memorization ratio metric (Longpre et al., 2021; Chen et al., 2022):
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
M _ {R} = \frac {f _ {m}}{f _ {m} + f _ {c}}, \tag {1}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
Table 3: Answer inconsistency rate between closed-book results (Step 1) and evidence-based QA with parametric memory (Step 4).
|
| 100 |
+
|
| 101 |
+
<table><tr><td></td><td>POPQA</td><td>STRATEGYQA</td></tr><tr><td>ChatGPT</td><td>4.7%</td><td>3.7%</td></tr><tr><td>GPT-4</td><td>3.9%</td><td>2.6%</td></tr><tr><td>PaLM2</td><td>8.4%</td><td>2.7%</td></tr><tr><td>Qwen-7B</td><td>5.4%</td><td>5.6%</td></tr><tr><td>Llama2-7B</td><td>4.7%</td><td>7.3%</td></tr><tr><td>Llama2-70B</td><td>2.3%</td><td>0.7%</td></tr><tr><td>Vicuna-7B</td><td>12.4%</td><td>6.9%</td></tr><tr><td>Vicuna-33B</td><td>16.6%</td><td>5.3%</td></tr></table>
|
| 102 |
+
|
| 103 |
+
Table 4: Number of final examples for each LLM. The difference between LLMs is due to their different outputs going through the framework.
|
| 104 |
+
|
| 105 |
+
<table><tr><td></td><td>POPQA(#)</td><td>STRATEGYQA(#)</td></tr><tr><td>ChatGPT</td><td>7,947</td><td>1,245</td></tr><tr><td>GPT-4</td><td>9,544</td><td>1,356</td></tr><tr><td>PaLM2</td><td>5,256</td><td>500</td></tr><tr><td>Qwen-7B</td><td>7,204</td><td>671</td></tr><tr><td>Llama2-7B</td><td>8,027</td><td>698</td></tr><tr><td>Llama2-70B</td><td>9,314</td><td>822</td></tr><tr><td>Vicuna-7B</td><td>4,170</td><td>559</td></tr><tr><td>Vicuna-33B</td><td>3,787</td><td>775</td></tr></table>
|
| 106 |
+
|
| 107 |
+
where $f_{m}$ is the frequency of memory answer and $f_{c}$ is that of counter-answer. Higher memorization ratios signify LLMs relying more on their parametric memory, while lower ratios indicate more frequent adoption of the counter-memory.
|
| 108 |
+
|
| 109 |
+
# 4 EXPERIMENTS
|
| 110 |
+
|
| 111 |
+
# 4.1 SINGLE-SOURCE EVIDENCE
|
| 112 |
+
|
| 113 |
+
We experiment with LLMs in the single-source evidence setting where counter-memory is the sole evidence presented to LLMs. Such knowledge conflict happens when LLMs are augmented with tools returning single external evidence such as Wikipedia API (Yao et al., 2023). In particular, for counter-memory construction, we would apply 1) the entity substitution counter-memory method, a widely-applied strategy in previous work, and 2) our generation-based method.
|
| 114 |
+
|
| 115 |
+
LLMs are stubborn when encountering entity substitution-based counter-memory. Following previous work (Longpre et al., 2021; Chen et al., 2022), we substitute the exactly matched ground truth entity mentions in the parametric memory with a random entity of the same type. The counter-memory is then used as the sole evidence for LLMs to answer the question. Here is an example:
|
| 116 |
+
|
| 117 |
+
Evidence: Washington D.C. London, USA's capital, has the Washington Monument.
|
| 118 |
+
Question: What is the capital city of USA? Answer by ChatGPT: Washington D.C.
|
| 119 |
+
|
| 120 |
+
Figure 2 shows the results with this approach on POPQA dataset. Observably, although the instruction clearly guides LLMs to answer questions based on the given counter-memory, LLMs still stick to their parametric memory instead, especially for three closed-sourced LLMs (ChatGPT, GPT-4, and PaLM2). This observation is aligned with previous work (Longpre et al., 2021). The reasons may stem from the incoherence of the evidence built with substitution: In the given example, although "Washington D.C." is successfully substituted by "London", the context containing Washington Monument and USA still highly correlate with the original entity, impeding LLMs to generate London as the answer. Furthermore, when comparing Llama2-7B and Vicuna-7B to their larger counterparts in the same series (i.e., Llama2-70B and Vicuna-33B), we observe that the larger LLMs are more inclined to insist on their parametric memory. We suppose that larger LLMs, due to their enhanced memorization and reasoning capabilities, are more sensitive to incoherent sentences.
|
| 121 |
+
|
| 122 |
+
LLMs are highly receptive to generated coherent counter-memory. To alleviate the incoherence issue of the above counter-memory, we instruct LLMs to directly generate coherent counter-memory following the steps aforementioned (Figure 1). Figure 2 shows the experimental results with generation-based counter-memory, from which we can have the following observations:
|
| 123 |
+
|
| 124 |
+
First, LLMs are actually highly receptive to external evidence if it is presented in a coherent way, even though it conflicts with their parametric memory. This contradicts the prior conclusion (Longpre et al., 2021) and the observation with entity substitution counter-memory shown in Figure 2. Such high receptiveness in turn shows that the counter-memory constructed through our framework is indeed more coherent and convincing. We manually check 50 stubborn (i.e., "Mem-Ans.") cases and
|
| 125 |
+
|
| 126 |
+

|
| 127 |
+
(a) ChatGPT
|
| 128 |
+
|
| 129 |
+

|
| 130 |
+
(b) GPT-4
|
| 131 |
+
|
| 132 |
+

|
| 133 |
+
(c) PaLM2
|
| 134 |
+
|
| 135 |
+

|
| 136 |
+
(d) Qwen-7B
|
| 137 |
+
|
| 138 |
+

|
| 139 |
+
(e) Llama2-7B
|
| 140 |
+
|
| 141 |
+

|
| 142 |
+
(f) Llama2-70B
|
| 143 |
+
|
| 144 |
+

|
| 145 |
+
(g) Vicuna-7B
|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
(h) Vicuna-33B
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
Figure 2: Answer distributions of entity substitution-based (Subs.) and generation-based (Gen.) counter-memory as the single evidence. Mem-Ans. and Ctr-Ans. refers to memory answer and counter-answer, respectively.
|
| 152 |
+
(a) ChatGPT
|
| 153 |
+
Figure 3: Memorization ratio of LLMs answering questions from different popularity categories. Higher memorization ratio indicates LLMs rely more on their parametric memory and generate the memory answer. We choose four widely-used LLMs as experimental objects.
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
(b) GPT-4
|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
(c) PaLM2
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
(d) Llama2-7B
|
| 163 |
+
|
| 164 |
+
find that most of them are due to hard-to-override commonsense or lack of strong direct conflicts. Detailed analyses can be found in Appendix B.3.
|
| 165 |
+
|
| 166 |
+
Second, many of the generated counter-memory are disinformation that misleads LLMs to the wrong answer. Concerningly, LLMs appear to be susceptible to and can be easily deceived by such disinformation. Exploring methods to prevent LLMs from such attacks when using external tools warrants significant attention in future research.
|
| 167 |
+
|
| 168 |
+
Third, the effectiveness of our generated counter-memory also shows that LLMs can generate convincing dis- or misinformation, sufficient to mislead even themselves. This raises concerns about the potential misuse of LLMs.
|
| 169 |
+
|
| 170 |
+
# 4.2 MULTI-SOURCE EVIDENCE
|
| 171 |
+
|
| 172 |
+
Multi-source evidence is a setting where multiple pieces of evidence that either supports or conflicts with the parametric memory are presented to LLMs. Such knowledge conflicts can happen frequently, e.g., when LLMs are augmented with search engines having diverse or even web-scale information sources. We study the evidence preference of LLMs from different aspects of evidence, including popularity, order, and quantity. By default, the order of evidence is randomized in all experiments in Section 4.2, if not specified otherwise.
|
| 173 |
+
|
| 174 |
+
LLMs exhibit stronger confirmation bias in more popular knowledge. Step 5 in Figure 1 illustrates how we instruct LLMs to answer questions when both parametric memory and counter-memory are presented as evidence. Figure 3 shows the memorization ratio of different LLMs w.r.t. the question popularity on POPQA.
|
| 175 |
+
|
| 176 |
+
Table 5: Memorization ratio of LLMs with different evidence orders.
|
| 177 |
+
|
| 178 |
+
<table><tr><td rowspan="2">First Evidence</td><td colspan="4">POPQA</td><td colspan="4">STRATEGYQA</td></tr><tr><td>ChatGPT</td><td>GPT-4</td><td>PaLM2</td><td>Llama2-7B</td><td>ChatGPT</td><td>GPT-4</td><td>PaLM2</td><td>Llama2-7B</td></tr><tr><td>Parametric Memory</td><td>46.7</td><td>60.9</td><td>38.6</td><td>33.3</td><td>59.5</td><td>73.6</td><td>43.6</td><td>84.0</td></tr><tr><td>Random</td><td>43.0</td><td>61.9</td><td>56.8</td><td>58.4</td><td>50.1</td><td>71.7</td><td>55.3</td><td>84.5</td></tr><tr><td>Counter-memory</td><td>40.1</td><td>62.7</td><td>72.2</td><td>82.8</td><td>42.2</td><td>70.5</td><td>76.9</td><td>86.2</td></tr></table>
|
| 179 |
+
|
| 180 |
+
Table 6: Memorization ratio of LLMs under varying proportions of parametric memory in all the available evidence, e.g., $\frac{1}{3}$ means one piece of parametric memory and two pieces of counter-memory.
|
| 181 |
+
|
| 182 |
+
<table><tr><td rowspan="2">Models</td><td colspan="6">POPQA</td><td colspan="6">STRATEGYQA</td></tr><tr><td>0/2(0%)</td><td>1/3(33%)</td><td>1/2(50%)</td><td>2/4(50%)</td><td>2/3(67%)</td><td>2/2(100%)</td><td>0/2(0%)</td><td>1/3(33%)</td><td>1/2(50%)</td><td>2/4(50%)</td><td>2/3(67%)</td><td>2/2(100%)</td></tr><tr><td colspan="13">Closed-source LLMs</td></tr><tr><td>ChatGPT</td><td>3.7</td><td>30.0</td><td>43.0</td><td>63.3</td><td>86.2</td><td>99.8</td><td>2.6</td><td>26.8</td><td>50.0</td><td>48.9</td><td>72.6</td><td>99.6</td></tr><tr><td>GPT-4</td><td>8.9</td><td>50.3</td><td>65.4</td><td>75.4</td><td>91.0</td><td>99.8</td><td>13.0</td><td>46.0</td><td>72.8</td><td>72.9</td><td>88.7</td><td>99.7</td></tr><tr><td>PaLM2</td><td>15.8</td><td>15.8</td><td>56.8</td><td>53.9</td><td>69.9</td><td>89.5</td><td>18.1</td><td>52.9</td><td>55.3</td><td>65.2</td><td>71.5</td><td>83.0</td></tr><tr><td colspan="13">Open-source LLMs</td></tr><tr><td>Qwen-7B</td><td>2.3</td><td>32.5</td><td>52.3</td><td>63.0</td><td>80.4</td><td>99.2</td><td>9.5</td><td>55.1</td><td>56.8</td><td>67.6</td><td>76.3</td><td>94.6</td></tr><tr><td>Llama2-7B</td><td>2.6</td><td>34.6</td><td>58.4</td><td>65.1</td><td>83.7</td><td>91.7</td><td>11.5</td><td>70.8</td><td>84.5</td><td>84.1</td><td>89.1</td><td>96.8</td></tr><tr><td>Llama2-70B</td><td>3.0</td><td>21.6</td><td>58.4</td><td>62.9</td><td>72.9</td><td>96.0</td><td>11.6</td><td>48.7</td><td>57.8</td><td>70.8</td><td>80.7</td><td>99.2</td></tr><tr><td>Vicuna-7B</td><td>1.7</td><td>29.5</td><td>45.9</td><td>56.2</td><td>74.6</td><td>98.6</td><td>44.9</td><td>86.1</td><td>87.0</td><td>88.6</td><td>89.8</td><td>97.1</td></tr><tr><td>Vicuna-33B</td><td>4.6</td><td>49.5</td><td>51.7</td><td>75.7</td><td>87.7</td><td>99.1</td><td>32.1</td><td>52.0</td><td>53.1</td><td>54.7</td><td>59.3</td><td>95.0</td></tr></table>
|
| 183 |
+
|
| 184 |
+
First, compared with when only the generated counter-memory is presented as evidence (single-source), both LLMs demonstrate significantly higher memorization ratios when parametric memory is also provided as evidence (multi-source), especially in the case of GPT-4. In other words, when faced with conflicting evidence, LLMs often prefer the evidence consistent with their internal belief (parametric memory) over the conflicting evidence (counter-memory), demonstrating a strong confirmation bias (Nickerson, 1998). Such properties could hinder the unbiased use of external evidence in tool-augmented LLMs.
|
| 185 |
+
|
| 186 |
+
Second, for questions regarding more popular entities, LLMs demonstrate a stronger confirmation bias. In particular, GPT-4 shows an $80\%$ memorization ratio for the most popular questions. This may suggest that LLMs form a stronger belief in facts concerning more popular entities, possibly because they have seen these facts and entities more often during pre-training, which leads to a stronger confirmation bias.
|
| 187 |
+
|
| 188 |
+
LLMs demonstrate a noticeable sensitivity to the evidence order. Previous work has shown a tendency in tool-augmented language models to select evidence presented in the top place (BehnamGhader et al., 2022) and the order sensitivity in LLMs (Lu et al., 2022). To demystify the impact of the evidence-presenting order in LLMs, we respectively put parametric memory and counter-memory as the first evidence in multi-source settings. As a reference, the results of first evidence randomly selected from the two are also reported in Table 5. In line with the popularity experiment, we use the same LLMs.
|
| 189 |
+
|
| 190 |
+
We observe that, with the exception of GPT-4, other models demonstrated pronounced order sensitivity, with fluctuations exceeding $5\%$ . It's especially concerning that the variations in PaLM2 and Llama2-7B surpassed $30\%$ . When evidence is presented first, ChatGPT tends to favor it; however, PaLM2 and Llama2-7B lean towards later pieces of evidence. Such order sensitivity for evidence in the context may not be a desirable property for tool-augmented LLMs. By default, the order of evidence is randomized in other experiments in this section.
|
| 191 |
+
|
| 192 |
+
LLMs follow the herd and choose the side with more evidence. In addition to LLM-generated evidence (parametric memory and counter-memory), we also extend to human-crafted ones such as Wikipedia. These highly credible and accessible human-written texts are likely to be retrieved as evidence by real-world search engine tools. We adopt Wikipedia passages from POPQA and manually annotated facts from STRATEGYQA with post-processing to ensure that the ground truth answer can indeed be deduced. Please refer to Appendix B.4 for more processing details.
|
| 193 |
+
|
| 194 |
+
To balance the quantity of evidence supporting memory answer and counter-answer, we create additional evidence through the method mentioned in Section 3.3, with the goal of achieving a
|
| 195 |
+
|
| 196 |
+
Table 7: Answer distribution of ChatGPT and Llama2-7B under different quantities of relevant (i.e., parametric memory and counter-memory) and irrelevant evidence (Irr.). In this setting, LLMs may generate irrelevant answers (Irr-Ans.): "w/ Relevant Evidence" means that we provide both a parametric memory and a counter-memory as evidence. Under the setting of 'w/o relevant evidence', the notation "-" indicates no counter-answers, consistent with the premise of lacking counter-memory.
|
| 197 |
+
|
| 198 |
+
<table><tr><td rowspan="2">Models</td><td rowspan="2">Irr.(#)</td><td colspan="4">w/o Relevant Evidence</td><td colspan="4">w/ Relevant Evidence</td></tr><tr><td>Mem-Ans.</td><td>Ctr-Ans.</td><td>Irr-Ans.</td><td>Uncertain</td><td>Mem-Ans.</td><td>Ctr-Ans.</td><td>Irr-Ans.</td><td>Uncertain</td></tr><tr><td rowspan="3">ChatGPT</td><td>1</td><td>9.8</td><td>-</td><td>18.2</td><td>72.0</td><td>46.7</td><td>49.7</td><td>0.9</td><td>2.7</td></tr><tr><td>2</td><td>6.5</td><td>-</td><td>11.7</td><td>81.8</td><td>46.0</td><td>50.9</td><td>1.2</td><td>2.0</td></tr><tr><td>3</td><td>5.9</td><td>-</td><td>10.6</td><td>83.5</td><td>45.6</td><td>48.8</td><td>1.3</td><td>4.3</td></tr><tr><td rowspan="3">Llama2-7B</td><td>1</td><td>6.3</td><td>-</td><td>92.4</td><td>1.4</td><td>63.5</td><td>33.6</td><td>2.6</td><td>0.3</td></tr><tr><td>2</td><td>5.6</td><td>-</td><td>93.4</td><td>1.0</td><td>58.8</td><td>32.7</td><td>8.1</td><td>0.4</td></tr><tr><td>3</td><td>5.0</td><td>-</td><td>94.3</td><td>0.7</td><td>58.9</td><td>27.8</td><td>13.1</td><td>0.2</td></tr></table>
|
| 199 |
+
|
| 200 |
+
balanced 2:2 split at most between parametric memory and counter-memory evidence. Table 6 shows the memorization ratio under different proportions between parametric memory-aligned evidence and counter-memory. We have three main observations: 1) LLMs generally provide answers backed by the majority of evidence. The higher the proportion of evidence supporting a particular answer, the more likely LLMs will return that answer. 2) The confirmation bias becomes increasingly obvious with a rise in the quantity of parametric memory evidence, despite maintaining a consistent relative proportion (e.g., $^{1/2}$ vs. $^{2/4}$ ). 3) Compared to other LLMs, GPT-4 and Vicuna-33B are less receptive to counter-memory across all proportions of evidence. Particularly, regardless of more pieces of evidence supporting the counter-answer (ratio $^{1/3}$ ), these two models still noticeably cling to their parametric memory. These observations once again signify the confirmation bias in LLMs.
|
| 201 |
+
|
| 202 |
+
LLMs can be distracted by irrelevant evidences. We further experiment on more complicated knowledge conflict scenario. We are interested in this question: Tools such as search engine may return irrelevant evidence — What if irrelevant evidence is presented to LLMs? When irrelevant evidence is presented, LLMs are expected to 1) abstain if no evidence clearly supports any answer and 2) ignore irrelevant evidence and answer based on the relevant ones. To set up, we regard top-ranked irrelevant passages retrieved by Sentence-BERT embeddings $^4$ (Reimers & Gurevych, 2019) as irrelevant evidence (i.e., sentences unrelated to the entities shown in the question). The experimental results on POPQA are presented in Table 7. We find that: 1) With only irrelevant evidence provided, LLMs can be distracted by them, delivering irrelevant answers. And this issue is particularly concerning in Llama2-7B. Meanwhile, as more irrelevant evidence is introduced, LLMs become less likely to answer based on their parametric memory. 2) With both relevant and irrelevant evidence provided, LLMs can filter out the irrelevant ones to a certain extent. This observation aligns with the study by Shi et al. (2023a) on how LLMs might be distracted by irrelevant context in mathematics problems. Furthermore, we find that as the quantity of irrelevant evidence increases, such an ability diminishes, especially in the case of Llama2-7B.
|
| 203 |
+
|
| 204 |
+
# 5 CONCLUSION
|
| 205 |
+
|
| 206 |
+
In this work, we propose a systematic framework to elicit the parametric memory of LLMs, construct counterpart counter-memory, and design a series of checks to entire their quality. With these parametric memory and counter-memory as external evidence, we simulate comprehensive scenarios as controlled experiments to unravel the behaviors of LLMs in knowledge conflicts. We find that LLMs are highly receptive to counter-memory when it is the only evidence presented in a coherent way. However, LLMs also demonstrate a strong confirmation bias toward parametric memory when both supportive and contradictory evidence to their parametric memory are present. In addition, we show that LLMs' evidence preference is influenced by the popularity, order, and quantity of evidence, none of which may be a desired property for tool-augmented LLMs. Finally, the effectiveness of our framework also demonstrates that LLMs can generate convincing misinformation, which poses potential ethical risks. We hope our work provides a solid evaluation testbed and useful insights for understanding, improving, and deploying tool-augmented LLMs in the future.
|
| 207 |
+
|
| 208 |
+
# ETHICS STATEMENT
|
| 209 |
+
|
| 210 |
+
Our study highlights a serious concern: LLMs can be instructed to make up coherent and convincing fake information. This underscores the potential misuse of these models if left unchecked. As researchers, it is our duty to address this pressing issue. The risks associated with the misuse of LLMs demand robust safeguards and prevention measures, requiring concerted effort from the wider research community. To this end, we commit to careful distribution of the data generated through our research, ensuring it serves strictly for research purposes. Our goal is to mitigate the risks while maximizing the benefits offered by LLMs.
|
| 211 |
+
|
| 212 |
+
# REPRODUCIBILITY STATEMENT
|
| 213 |
+
|
| 214 |
+
Our experiments utilize three closed-sourced LLMs accessed via API, as well as five open-sourced LLMs. We have increased reproducibility by including the prompts used in our experiments in Appendix C. As for the versions of the closed-sourced LLMs, we used ChatGPT-0301, GPT-4-0314, and Chat-Bison-001 of PaLM2 in all our tests.
|
| 215 |
+
|
| 216 |
+
# ACKNOWLEDGEMENTS
|
| 217 |
+
|
| 218 |
+
The authors would like to thank colleagues from the OSU NLP group for their constructive feedback and manual evaluations. The authors would also like to thank Siyu Yuan, Wei Shi, and Jiayi Fu from Fudan University as well as the anonymous reviewers for their valuable comments. This research was sponsored in part by Cisco and YS's startup funds.
|
| 219 |
+
|
| 220 |
+
# REFERENCES
|
| 221 |
+
|
| 222 |
+
Alibaba. Qwen, 2023. URL https://github.com/QwenLM/Qwen-7B/blob/main/tech_memoa.md.
|
| 223 |
+
Rohan Anil, Andrew M Dai, Orhan First, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, et al. Palm 2 technical report. arXiv preprint arXiv:2305.10403, 2023.
|
| 224 |
+
AutoGPT. Autogpt, 2023. URL https://github.com/Significant-Gravitas/AutoGPT.
|
| 225 |
+
Parishad BehnamGhader, Santiago Miret, and Siva Reddy. Can retriever-augmented language models reason? the blame game between the retriever and the language model. arXiv preprint arXiv:2212.09146, 2022. URL https://arxiv.org/abs/2212.09146.
|
| 226 |
+
Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George Bm Van Den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. Improving language models by retrieving from trillions of tokens. In Proceedings of ICML, 2022. URL https://proceedings.mlr.press/v162/borgeaud22a.html.
|
| 227 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. In Proceedings of NeurIPS, 2020. URL https://papers.nips.cc/paper/2020/Hash/1457c0d6bfbcb4967418bf8ac142f64a-Abstract.html.
|
| 228 |
+
Nicholas Carlini, Florian Tramer, Eric Wallace, Matthew Jagielski, Ariel Herbert-Voss, Katherine Lee, Adam Roberts, Tom Brown, Dawn Song, Ulfar Erlingsson, Alina Oprea, and Colin Raffel. Extracting training data from large language models. In Proceedings of USENIX Security Symposium, 2021. URL https://www.usenix.org/conference/usenixsecurity21/presentation/carlini-extracting.
|
| 229 |
+
Nicholas Carlini, Daphne Ippolito, Matthew Jagielski, Katherine Lee, Florian Tramer, and Chiyuan Zhang. Quantifying memorization across neural language models. In Proceedings of ICLR, 2023. URL https://openreview.net/forum?id=TatRHT_1cK.
|
| 230 |
+
Hung-Ting Chen, Michael Zhang, and Eunsol Choi. Rich knowledge sources bring complex knowledge conflicts: Recalibrating models to reflect conflicting evidence. In Proceedings of EMNLP, pp. 2292-2307, 2022. URL https://aclanthology.org/2022.emnlp-main.146.
|
| 231 |
+
|
| 232 |
+
Jiangjie Chen, Wei Shi, Ziquan Fu, Sijie Cheng, Lei Li, and Yanghua Xiao. Say what you mean! large language models speak too positively about negative commonsense knowledge. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9890-9908, Toronto, Canada, July 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023.acl-long.550.
|
| 233 |
+
Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311, 2022. URL https://arxiv.org/abs/2204.02311.
|
| 234 |
+
Damai Dai, Li Dong, Yaru Hao, Zhifang Sui, Baobao Chang, and Furu Wei. Knowledge neurons in pretrained transformers. In Proceedings of ACL, 2022. URL https://aclanthology.org/2022.acl-long.581.
|
| 235 |
+
Nicola De Cao, Wilker Aziz, and Ivan Titov. Editing factual knowledge in language models. In Proceedings of EMNLP, 2021. URL https://aclanthology.org/2021.emnlp-main.522.
|
| 236 |
+
Yanai Elazar, Nora Kassner, Shauli Ravfogel, Abhilasha Ravichander, Eduard Hovy, Hinrich Schütze, and Yoav Goldberg. Measuring and improving consistency in pretrained language models. Transactions of ACL, 2021. URL https://aclanthology.org/2021.tacl-1.60/.
|
| 237 |
+
Tianyu Gao, Howard Yen, Jiatong Yu, and Danqi Chen. Enabling large language models to generate text with citations. arXiv preprint arXiv:2305.14627, 2023.
|
| 238 |
+
Mor Geva, Daniel Khashabi, Elad Segal, Tushar Khot, Dan Roth, and Jonathan Berant. Did aristotle use a laptop? a question answering benchmark with implicit reasoning strategies. Transactions of ACL, 2021. URL https://aclanthology.org/2021.tacl-1.21/.
|
| 239 |
+
Reto Gubelmann and Siegfried Handschuh. Context matters: A pragmatic study of plms' negation understanding. In Proceedings of ACL, 2022. URL https://aclanthology.org/2022.acl-long.315/.
|
| 240 |
+
Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. Retrieval augmented language model pre-training. In Proceedings of ICML, pp. 3929-3938, 2020. URL https://dl.acm.org/doi/abs/10.5555/3524938.3525306.
|
| 241 |
+
Pengcheng He, Xiaodong Liu, Jianfeng Gao, and Weizhu Chen. Deberta: Decoding-enhanced bert with disentangled attention. In Proceedings of ICLR, 2021. URL https://openreview.net/forum?id=XPZIaotutsD.
|
| 242 |
+
Gautier Izacard and Edouard Grave. Leveraging passage retrieval with generative models for open domain question answering. In Proceedings of EACL, 2021. URL https://aclanthology.org/2021.eacl-main.74.
|
| 243 |
+
Ziwei Ji, Nayeon Lee, Rita Frieske, Tiezheng Yu, Dan Su, Yan Xu, Etsuko Ishii, Ye Jin Bang, Andrea Madotto, and Pascale Fung. Survey of hallucination in natural language generation. ACM Computing Surveys, 2023. URL https://dl.acm.org/doi/10.1145/3571730.
|
| 244 |
+
Zhengbao Jiang, Frank F Xu, Jun Araki, and Graham Neubig. How can we know what language models know? Transactions of ACL, 2020. URL https://aclanthology.org/2020.tacl-1.28.
|
| 245 |
+
Nora Kassner, Oyvind Tafjord, Hinrich Schütze, and Peter Clark. Beliefbank: Adding memory to a pre-trained language model for a systematic notion of belief. In Proceedings of EMNLP, 2021. URL https://aclanthology.org/2021.emnlp-main.697.
|
| 246 |
+
Urvashi Khandelwal, Omer Levy, Dan Jurafsky, Luke Zettlemoyer, and Mike Lewis. Generalization through memorization: Nearest neighbor language models. In Proceedings of ICLR, 2020. URL https://openreview.net/forum?id=HklBjCEKvH.
|
| 247 |
+
Angeliki Lazaridou, Adhiguna Kuncoro, Elena Gribovskaya, Devang Agrawal, Adam Liska, Tayfun Terzi, Mai Gimenez, Cyprien de Masson d'Autume, Tomás Kočiský, Sebastian Ruder, Dani Yogatama, Kris Cao, Susannah Young, and Phil Blunsom. Mind the gap: Assessing temporal generalization in neural language models. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan (eds.), Proceedings of NeurIPS, 2021. URL https://openreview.net/forum?id=73OmmrCfSyy.
|
| 248 |
+
|
| 249 |
+
Xiang Lorraine Li, Adhiguna Kuncoro, Jordan Hoffmann, Cyprien de Masson d'Autume, Phil Blunsom, and Aida Nematzadeh. A systematic investigation of commonsense knowledge in large language models. In Proceedings of EMNLP, 2022. URL https://aclanthology.org/2022.emnlp-main.812/.
|
| 250 |
+
Bill Yuchen Lin, Seyeon Lee, Rahul Khanna, and Xiang Ren. Birds have four legs?! NumerSense: Probing Numerical Commonsense Knowledge of Pre-Trained Language Models. In Proceedings of EMNLP, 2020. URL https://aclanthology.org/2020.emnlp-main.557.
|
| 251 |
+
Adam Liska, Tomas Kocisky, Elena Gribovskaya, Tayfun Terzi, Eren Sezener, Devang Agrawal, D'Autume Cyprien De Masson, Tim Scholtes, Manzil Zaheer, Susannah Young, et al. Streamingqa: A benchmark for adaptation to new knowledge over time in question answering models. In Proceedings of ICML, 2022. URL https://proceedings.mlr.press/v162/lisha22a/lisha22a.pdf.
|
| 252 |
+
Shayne Longpre, Kartik Perisetla, Anthony Chen, Nikhil Ramesh, Chris DuBois, and Sameer Singh. Entity-based knowledge conflicts in question answering. In Proceedings of EMNLP, 2021. URL https://aclanthology.org/2021.emnlp-main.565.
|
| 253 |
+
Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. arXiv preprint arXiv:2304.09842, 2023. URL https://arxiv.org/abs/2304.09842.
|
| 254 |
+
Yao Lu, Max Bartolo, Alastair Moore, Sebastian Riedel, and Pontus Stenetorp. Fantastically ordered prompts and where to find them: Overcoming few-shot prompt order sensitivity. In Proceedings of ACL, 2022. URL https://aclanthology.org/2022.acl-long.556.
|
| 255 |
+
Kelvin Luu, Daniel Khashabi, Suchin Gururangan, Karishma Mandyam, and Noah A Smith. Time waits for no one! analysis and challenges of temporal misalignment. In Proceedings of NAACL, 2022. URL https://aclanthology.org/2022.nacl-main.435/.
|
| 256 |
+
Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Hannaneh Hajishirzi, and Daniel Khashabi. When not to trust language models: Investigating effectiveness and limitations of parametric and non-parametric memories. arXiv preprint arXiv:2212.10511, 2022. URL https://arxiv.org/abs/2212.10511.
|
| 257 |
+
Kevin Meng, David Bau, Alex J Andonian, and Yonatan Belinkov. Locating and editing factual associations in GPT. In Proceedings of NeurIPS, 2022. URL https://openreview.net/forum?id=-h6WAS6eE4.
|
| 258 |
+
Kevin Meng, Arnab Sen Sharma, Alex J Andonian, Yonatan Belinkov, and David Bau. Mass-editing memory in a transformer. In Proceedings of ICLR, 2023. URL https://openreview.net/forum?id= MkbcAHIYgyS.
|
| 259 |
+
Reiichiro Nakano, Jacob Hilton, Suchir Balaji, Jeff Wu, Long Ouyang, Christina Kim, Christopher Hesse, Shantanu Jain, Vineet Kosaraju, William Saunders, et al. Webgpt: Browser-assisted question-answering with human feedback. arXiv preprint arXiv:2112.09332, 2021. URL https://arxiv.org/abs/2112.09332.
|
| 260 |
+
Raymond S Nickerson. Confirmation bias: A ubiquitous phenomenon in many guises. Review of general psychology, 2(2):175-220, 1998. URL https://journals.sagepub.com/doi/abs/10.1037/1089-2680.2.2.175?journalCode=rgpa.
|
| 261 |
+
Tong Niu and Mohit Bansal. Adversarial over-sensitivity and over-stability strategies for dialogue models. In Proceedings of CoNLL, 2018. URL https://aclanthology.org/K18-1047/.
|
| 262 |
+
OpenAI. Chatgpt, 2022. URL https://openai.com/blog/chatgpt.
|
| 263 |
+
OpenAI. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. URL https://arxiv.org/abs/2303.08774.
|
| 264 |
+
Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. In Proceedings of NeurIPS, 2022. URL https://proceedings.neurips.cc/paper_files/paper/2022/bit/1efde53be364a73914f58805a001731-Abstract-Conference.html.
|
| 265 |
+
|
| 266 |
+
Liangming Pan, Wenhu Chen, Min-Yen Kan, and William Yang Wang. Contraqa: Question answering under contradicting contexts. arXiv preprint arXiv:2110.07803, 2021. URL https://arxiv.org/abs/2110.07803.
|
| 267 |
+
Fabio Petroni, Tim Rocktäschel, Sebastian Riedel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, and Alexander Miller. Language models as knowledge bases? In Proceedings of EMNLP-IJCNLP, 2019. URL https://aclanthology.org/D19-1250.
|
| 268 |
+
Fabio Petroni, Patrick Lewis, Aleksandra Piktus, Tim Roktaschel, Yuxiang Wu, Alexander H. Miller, and Sebastian Riedel. How context affects language models' factual predictions. In Proceedings of AKBC, 2020. URL https://openreview.net/forum?id=025X0zPfn.
|
| 269 |
+
Yujia Qin, Shengding Hu, Yankai Lin, Weize Chen, Ning Ding, Ganqu Cui, Zheni Zeng, Yufei Huang, Chaojun Xiao, Chi Han, et al. Tool learning with foundation models. arXiv preprint arXiv:2304.08354, 2023. URL https://arxiv.org/abs/2304.08354.
|
| 270 |
+
Pranav Rajpurkar, Robin Jia, and Percy Liang. Know what you don't know: Unanswerable questions for squad. In Proceedings of ACL, 2018. URL https://arxiv.org/abs/1806.03822.
|
| 271 |
+
Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, and Yoav Shoham. In-context retrieval-augmented language models. arXiv preprint arXiv:2302.00083, 2023. URL https://arxiv.org/abs/2302.00083.
|
| 272 |
+
Nils Reimers and Iryna Gurevych. Sentence-bert: Sentence embeddings using siamese bert-networks. In Proceedings of EMNLP, 2019. URL https://arxiv.org/abs/1908.10084.
|
| 273 |
+
Adam Roberts, Colin Raffel, and Noam Shazeer. How much knowledge can you pack into the parameters of a language model? In Proceedings of EMNLP, 2020. URL https://aclanthology.org/2020.emnlp-main.437/.
|
| 274 |
+
Timo Schick, Jane Dwivedi-Yu, Roberto Dessi, Roberta Raileanu, Maria Lomeli, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. Toolformer: Language models can teach themselves to use tools. arXiv preprint arXiv:2302.04761, 2023. URL https://arxiv.org/abs/2302.04761.
|
| 275 |
+
Freda Shi, Xinyun Chen, Kanishka Misra, Nathan Scales, David Dohan, Ed Chi, Nathanael Scharli, and Denny Zhou. Large language models can be easily distracted by irrelevant context. arXiv preprint arXiv:2302.00093, 2023a. URL https://arxiv.org/abs/2302.00093.
|
| 276 |
+
Weijia Shi, Sewon Min, Michihiro Yasunaga, Minjoon Seo, Rich James, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. Replug: Retrieval-augmented black-box language models. arXiv preprint arXiv:2301.12652, 2023b. URL https://arxiv.org/abs/2302.00083.
|
| 277 |
+
Kurt Shuster, Spencer Poff, Moya Chen, Douwe Kiela, and Jason Weston. Retrieval augmentation reduces hallucination in conversation. In Findings of EMNLP, 2021. URL https://aclanthology.org/2021.findings-emnlp.320/.
|
| 278 |
+
Chenglei Si, Zhe Gan, Zhengyuan Yang, Shuohang Wang, Jianfeng Wang, Jordan Lee Boyd-Graber, and Lijuan Wang. Prompting GPT-3 to be reliable. In Proceedings of ICLR, 2023. URL https://openreview.net/forum?id=98p5x51L5af.
|
| 279 |
+
Zhiqing Sun, Xuezhi Wang, Yi Tay, Yiming Yang, and Denny Zhou. Recitation-augmented language models. In Proceedings of ICLR, 2023. URL https://openreview.net/forum?id=-cqvvvb-NKI.
|
| 280 |
+
Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023a. URL https://arxiv.org/abs/2302.13971.
|
| 281 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023b.
|
| 282 |
+
|
| 283 |
+
Tu Vu, Mohit Iyyer, Xuezhi Wang, Noah Constant, Jerry Wei, Jason Wei, Chris Tar, Yun-Hsuan Sung, Denny Zhou, Quoc Le, et al. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214, 2023.
|
| 284 |
+
Boshi Wang, Xiang Yue, and Huan Sun. Can chatgpt defend the truth? automatic dialectical evaluation elicits llms' deficiencies in reasoning. arXiv preprint arXiv:2305.13160, 2023.
|
| 285 |
+
Cunxiang Wang, Pai Liu, and Yue Zhang. Can generative pre-trained language models serve as knowledge bases for closed-book qa? In Proceedings of ACL-IJCNLP, 2021. URL https://aclanthology.org/2021.acl-long.251/.
|
| 286 |
+
Peter West, Chandra Bhagavatula, Jack Hessel, Jena Hwang, Liwei Jiang, Ronan Le Bras, Ximing Lu, Sean Welleck, and Yejin Choi. Symbolic knowledge distillation: from general language models to commonsense models. In Proceedings of NAACL, 2022. URL https://aclanthology.org/2022.naacl-main.341.
|
| 287 |
+
Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. ReAct: Synergizing reasoning and acting in language models. In Proceedings of ICLR, 2023. URL https://arxiv.org/abs/2210.03629.
|
| 288 |
+
Wenhao Yu, Dan Iter, Shuohang Wang, Yichong Xu, Mingxuan Ju, Soumya Sanyal, Chenguang Zhu, Michael Zeng, and Meng Jiang. Generate rather than retrieve: Large language models are strong context generators. In Proceedings of ICLR, 2023. URL https://openreview.net/forum?id=fb0hRu9GZUS.
|
| 289 |
+
Xiang Yue, Boshi Wang, Kai Zhang, Ziru Chen, Yu Su, and Huan Sun. Automatic evaluation of attribution by large language models. arXiv preprint arXiv:2305.06311, 2023. URL https://arxiv.org/abs/2305.06311.
|
| 290 |
+
Aohan Zeng, Xiao Liu, Zhengxiao Du, Zihan Wang, Hanyu Lai, Ming Ding, Zhuoyi Yang, Yifan Xu, Wendi Zheng, Xiao Xia, Weng Lam Tam, Zixuan Ma, Yufei Xue, Jidong Zhai, Wenguang Chen, Zhiyuan Liu, Peng Zhang, Yuxiao Dong, and Jie Tang. GLM-130b: An open bilingual pre-trained model. In Proceedings of ICLR, 2023. URL https://openreview.net/forum?id=-Aw0rrrPUF.
|
| 291 |
+
Chiyuan Zhang, Daphne Ippolito, Katherine Lee, Matthew Jagielski, Florian Tramér, and Nicholas Carlini. Counterfactual memorization in neural language models. arXiv preprint arXiv:2112.12938, 2021. URL https://arxiv.org/abs/2112.12938.
|
| 292 |
+
Tianjun Zhang, Fangchen Liu, Justin Wong, Pieter Abbeel, and Joseph E Gonzalez. The wisdom of hindsight makes language models better instruction followers. arXiv preprint arXiv:2302.05206, 2023.
|
| 293 |
+
Ruochen Zhao, Hailin Chen, Weishi Wang, Fangkai Jiao, Xuan Long Do, Chengwei Qin, Bosheng Ding, Xiaobao Guo, Minzhi Li, Xingxuan Li, et al. Retrieving multimodal information for augmented generation: A survey. arXiv preprint arXiv:2303.10868, 2023. URL https://arxiv.org/abs/2303.10868.
|
| 294 |
+
Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. arXiv preprint arXiv:2306.05685, 2023.
|
| 295 |
+
Zexuan Zhong, Tao Lei, and Danqi Chen. Training language models with memory augmentation. In Proceedings of EMNLP, 2022. URL https://aclanthology.org/2022.emnlp-main.382.
|
| 296 |
+
Wenxuan Zhou, Sheng Zhang, Hoifung Poon, and Muhao Chen. Context-faithful prompting for large language models. arXiv preprint arXiv:2303.11315, 2023. URL https://arxiv.org/abs/2303.11315.
|
| 297 |
+
|
| 298 |
+
# APPENDIX
|
| 299 |
+
|
| 300 |
+
Within this supplementary material, we elaborate on the following aspects:
|
| 301 |
+
|
| 302 |
+
- Appendix A: Discussions
|
| 303 |
+
- Appendix B: Experimental Setup Details
|
| 304 |
+
- Appendix C: Prompts List
|
| 305 |
+
|
| 306 |
+
# A DISCUSSIONS
|
| 307 |
+
|
| 308 |
+
# A.1 BROADER IMPACT AND POTENTIAL SOLUTIONS
|
| 309 |
+
|
| 310 |
+
We observe two behaviors of LLMs in knowledge conflict: (1) high receptiveness to single external evidence and (2) confirmation bias to multiple pieces of external evidence, and we will discuss its impact and potential solutions in detail.
|
| 311 |
+
|
| 312 |
+
Firstly, high receptiveness is a two-sided coin. On one side, it implies that remedying the outdated or incorrect parametric knowledge of LLMs can be effectively achieved (Zheng et al., 2023; Vu et al., 2023), which is beneficial to methodologies such as retrieval-augmented generation. On the other side, as LLMs are increasingly connected with external tools, such as ChatGPT Plugins and recent language agents like AutoGPT (AutoGPT, 2023), the high receptiveness to external input raises concerns – LLMs can be easily deceived by misleading or manipulative information from malicious third-party tools.
|
| 313 |
+
|
| 314 |
+
Confirmation bias is a highly undesired property, especially for generative search engines or similar applications (e.g., multi-document summarization) of LLMs where orchestrating multiple pieces of potentially contradicting information in an unbiased way is important.
|
| 315 |
+
|
| 316 |
+
In terms of potential solutions, for the risks due to high receptiveness, a validation and monitoring system should be employed to prevent improper information by third-party tools from being presented to LLMs. For confirmation bias, depending on the deployment scenarios, further alignment through fine-tuning or reinforcement learning from human feedback (RLHF) Ouyang et al. (2022); Zhang et al. (2023) to reduce the bias could be a promising direction. Finally, from a generative search engine perspective, citing the sources for the answer and letting users be more informed and judge the final answer can be a more reliable way (Yue et al., 2023; Gao et al., 2023).
|
| 317 |
+
|
| 318 |
+
# A.2 ADDITIONAL KNOWLEDGE CONFLICT DISCUSSION
|
| 319 |
+
|
| 320 |
+

|
| 321 |
+
Figure A.1: We report the changes in the ChatGPT's evidence preference before and after fragmenting the evidence. OthersToWhole means ChatGPT now favors the entire evidence supporting a different answer, which is inconsistent with its preference before fragmentation.
|
| 322 |
+
|
| 323 |
+

|
| 324 |
+
Figure A.2: The answer distribution of ChatGPT under different length ratios between parametric memory and counter-memory.
|
| 325 |
+
|
| 326 |
+
LLMs barely consider short counter-memory, while they adopt parametric memory of any length. As a proxy of convincing degree, the length of evidence may affect the preference of LLMs. To verify it, we categorize the examples based on the length ratio between parametric memory and
|
| 327 |
+
|
| 328 |
+
Table A.1: Answer distribution of ChatGPT when given extremely short evidence (i.e., answer presented as evidence). Memory Answer and Counter-answer indicates parametric memory and counter-memory are replaced by their corresponding answer, respectively. Standard denotes both pieces of evidence are intact.
|
| 329 |
+
|
| 330 |
+
<table><tr><td>Evidence</td><td>Mem-Ans.</td><td>Ctr-Ans.</td><td>Uncertain</td></tr><tr><td>Memory Answer</td><td>43.9</td><td>54.9</td><td>1.2</td></tr><tr><td>Standard</td><td>42.7</td><td>56.7</td><td>0.6</td></tr><tr><td>Counter-answer</td><td>79.9</td><td>18.8</td><td>1.3</td></tr></table>
|
| 331 |
+
|
| 332 |
+
counter-memory, i.e., $< 0.8, > 1.2$ , and [0.8, 1.2], which are distinguishable in the data samples.5 Figure A.2 shows the answer distribution within each category. It is evident that ChatGPT tends to adopt the longer side, especially in STRATEGYQA, where longer evidence generally indicates more reasoning steps.
|
| 333 |
+
|
| 334 |
+
To explore the largest impact of evidence length, we further explore the scenarios with extremely short evidence. Specifically, we present the answer as evidence to LLMs directly and investigate whether they adopt such a short evidence without any concrete explanations. We alternately replace either parametric memory or counter-memory with their respective supporting answers, while keeping the other one intact. This results in memory answer vs. counter-memory and counter-answer vs. parametric memory. Table A.1 shows the results of POPQA: shorter counter-memory evidence (counter-answer) is less likely to be considered by LLMs (56.7% to 18.8%). However, shortening parametric memory evidence into memory answer does not affect the preferences of LLMs much; interestingly, it is even more favored by LLMs (42.7% to 43.9%). In other words, persuading LLMs to embrace counter-memory needs informative and solid evidence. In contrast, short evidence that aligns with parametric memory is acceptable enough by LLMs as the associated memory is encoded in the parameters already. This observation indicates the parametric memory we elicit could well be the firm beliefs of LLMs. More importantly, this unequal receptiveness to evidence further highlights the presence of strong confirmation bias in LLMs, a potentially significant limitation when they are used in tool-augmented applications.
|
| 335 |
+
|
| 336 |
+
LLMs demonstrate a deficiency in information integration. In real-world scenarios, a complex query may require fragmented evidence gathered from different sources to have the final answer. As a multi-step reasoning dataset, STRATEGYQA provides multiple separate pieces of evidence related to sub-questions. Therefore, we take STRATEGYQA as an ideal sample dataset for such exploration. In the standard mode, we merge these facts to construct an intact piece of evidence. However, in this setting, we treat each fact as an individual piece of evidence, without any consolidation. The results in Figure A.1 clearly show: after the original evidence (parametric memory or counter-memory) used by ChatGPT is fragmented, ChatGPT shifts to consider the other intact evidence (counter-memory or parametric memory) in $38.2\%$ examples, indicating the limited abilities of LLMs to integrate fragments of evidence. This observation also suggests that the same external evidence in different formats (fragmented or whole) may have different effects on LLMs in the tool-augmented systems. Therefore, from the perspective of external tools, it is worth exploring the presentation of evidence in an easy-to-use format for LLMs in the future.
|
| 337 |
+
|
| 338 |
+
LLMs are confident in their response. Beyond observing textual responses, we also investigate how confident the LLMs are in their responses. With Llama2-7B as a case study, we report the log probabilities for the token it generates, after normalizing over all three tokens representing memory answer, counter-answer, and uncertain. Specifically, we mainly explore two scenarios:
|
| 339 |
+
|
| 340 |
+
- Firstly, in the single-source setting where counter-memory is presented as the sole evidence, we sampled 1,000 examples that Llama2-7B gives a counter-answer. In Figure A.3, Llama2-7B shows high confidence when generating the counter-answer and $91.3\%$ of examples have a memory answer probability of $95\%$ or greater. This demonstrates the high receptiveness to the external evidence, even when it conflicts with LLM's parametric memory.
|
| 341 |
+
- Secondly, in the multi-source scenario where two supportive and two contradictory pieces of evidence are presented, we sample 1,000 instances that Llama2-7B favors the counter
|
| 342 |
+
|
| 343 |
+

|
| 344 |
+
Figure A.3: Normalized log probability for the first token of counter-answer when counter-memory is the only external evidence presented to Llama2-7B.
|
| 345 |
+
|
| 346 |
+

|
| 347 |
+
Figure A.4: Normalized log probability for the first token of memory answer when four evidence (two supportive and two contradictory to the parametric memory) are presented.
|
| 348 |
+
|
| 349 |
+
answer. Figure A.4 shows that Llama2-7B is confident in its memory answer response, based on the token log probability. For instance, $96.3\%$ of the examples show a log probability of $95\%$ or greater for the counter-answer. Both the high frequency (65% in Table 6) and the high confidence of using memory-aligned evidence indicate the potential confirmation bias of LLMs.
|
| 350 |
+
|
| 351 |
+
# B EXPERIMENTAL SETUP DETAILS
|
| 352 |
+
|
| 353 |
+
# B.1 COUNTER-MEMORY CONSTRUCTION DETAILS
|
| 354 |
+
|
| 355 |
+
To construct high-quality counter-memory, we incorporate ChatGPT as a generator to produce text at a human-written level. Specifically, we first reframe the memory answer to construct the counter-answer. For different datasets, we utilize different strategies.
|
| 356 |
+
|
| 357 |
+
Due to the POPQA is a entity-centric QA dataset, we adopt the following principles: (i) If the memory answer is wrong, we directly adopt the triplets provided by POPQA. (ii) If the memory answer is right, we substitute the object entities in the triplets with those of the same relation from the ground truth (the objects within the same relationship category are of consistent entity types). Filters are applied based on exact matching to prevent any overlap between the selected entities and the candidate ground truth. Subsequently, we use a template to generate claims in a natural language format based on the triplets.
|
| 358 |
+
|
| 359 |
+
Considering that the output of STRATEGYQA is "True" or "False", it cannot be directly used as a claim. Therefore, we employ ChatGPT to generate two claims corresponding to "True" and "False", respectively. Based on the output, the generated claims are dynamically classified as memory answer and counter-answer. To ensure high-quality and control format, we adopt the in-context learning strategy and use three demonstrations.
|
| 360 |
+
|
| 361 |
+
After obtaining the counter-answer, we instruct the ChatGPT to generate the counter-memory.
|
| 362 |
+
|
| 363 |
+
# B.2 DATASET DETAILS
|
| 364 |
+
|
| 365 |
+
The dataset scale at each step are presented in the Table B.3. We also report the inconsistency type distribution in Table B.5. And some examples of answer inconsistency on LLMs are presented in Table B.6. In Table B.7, we show more examples in the final datasets.
|
| 366 |
+
|
| 367 |
+
# B.3 EXAMPLES OF STUBBORNNESS IN RESPONSE TO PARAMETRIC MEMORY
|
| 368 |
+
|
| 369 |
+
In Table B.8, we present some examples which are stubborn to give memory answer even only the counter-memory evidence given. Upon manually scrutinizing 50 randomly selected samples,
|
| 370 |
+
|
| 371 |
+
Table B.2: Human-written templates for POPQA counter-answer construction. [subj] and [obj] denote subject and object entity in triplet, respectively.
|
| 372 |
+
|
| 373 |
+
<table><tr><td>Relationship</td><td>Template</td></tr><tr><td>occupation</td><td>[subj]'s occupation is [obj].</td></tr><tr><td>place of birth</td><td>[subj] was born in [obj].</td></tr><tr><td>genre</td><td>The genre of [subj] is [obj].</td></tr><tr><td>father</td><td>[obj] is the father of [subj].</td></tr><tr><td>country</td><td>[subj] is in [obj].</td></tr><tr><td>producer</td><td>[obj] is the producer of [subj].</td></tr><tr><td>director</td><td>[obj] is the director of [subj].</td></tr><tr><td>capital of</td><td>[subj] is the capital of [obj].</td></tr><tr><td>screenwriter</td><td>[obj] was the screenwriter for [subj].</td></tr><tr><td>composer</td><td>[obj] was the composer of [subj].</td></tr><tr><td>color</td><td>The color of [subj] is [obj].</td></tr><tr><td>religion</td><td>[obj] is the religion of [subj].</td></tr><tr><td>sport</td><td>[subj] plays [obj].</td></tr><tr><td>author</td><td>[obj] is the author of [subj].</td></tr><tr><td>mother</td><td>[obj] is the mother of [subj].</td></tr><tr><td>capital</td><td>[obj] is the capital of [subj].</td></tr></table>
|
| 374 |
+
|
| 375 |
+
Table B.3: The dataset scale at each step. "Illegal" indicates that the output format is not as expected (i.e. output the answer and supporting reason at the same time).
|
| 376 |
+
|
| 377 |
+
<table><tr><td></td><td>ChatGPT</td><td>GPT-4</td><td>PaLM2</td><td>Qwen-7B</td><td>Llama2-7B</td><td>Llama2-70B</td><td>Vicuna-7B</td><td>Vicuna-33B</td></tr><tr><td colspan="9">POPQA</td></tr><tr><td>Initial</td><td>14,267</td><td>14,267</td><td>14,267</td><td>14,267</td><td>14,267</td><td>14,267</td><td>14,267</td><td>14,267</td></tr><tr><td>Absention / Illegal</td><td>12,435</td><td>14,194</td><td>12,476</td><td>12,759</td><td>14,197</td><td>14,175</td><td>13,185</td><td>14,219</td></tr><tr><td>Parametric Memory Entail</td><td>9,359</td><td>11,776</td><td>8,963</td><td>10,372</td><td>12,332</td><td>12,828</td><td>9,164</td><td>9,177</td></tr><tr><td>Answer-consistency Filter</td><td>8,920</td><td>11,437</td><td>7,836</td><td>9,905</td><td>11,733</td><td>12,444</td><td>7,915</td><td>7,624</td></tr><tr><td>Counter-memory Entail</td><td>7,949</td><td>9,544</td><td>5,256</td><td>7,204</td><td>8,027</td><td>9,314</td><td>4,170</td><td>3,787</td></tr><tr><td colspan="9">STRATEGYQA</td></tr><tr><td>Initial</td><td>2,290</td><td>2,290</td><td>2,290</td><td>2,290</td><td>2,290</td><td>2,290</td><td>2,290</td><td>2,290</td></tr><tr><td>Absention / Illegal</td><td>2,148</td><td>2,116</td><td>2,022</td><td>2,043</td><td>2,290</td><td>2,287</td><td>2,287</td><td>2,289</td></tr><tr><td>Parametric Memory Entail</td><td>1,698</td><td>2,015</td><td>715</td><td>858</td><td>898</td><td>989</td><td>711</td><td>979</td></tr><tr><td>Answer-consistency Filter</td><td>1,627</td><td>1,963</td><td>542</td><td>799</td><td>832</td><td>981</td><td>662</td><td>927</td></tr><tr><td>Counter-memory Entail</td><td>1,245</td><td>1,356</td><td>500</td><td>671</td><td>698</td><td>822</td><td>559</td><td>775</td></tr></table>
|
| 378 |
+
|
| 379 |
+
we discover that ambiguity in counter-memory, commonsense question leading to unacceptable counter-memory, or highly suggestive questions, account for 34 of these instances. This implies that only a minimal fraction of LLMs demonstrate stubbornness towards parametric memory, reaffirming that LLMs maintain open in the single source setting.
|
| 380 |
+
|
| 381 |
+
# B.4 PROCESS FOR HUMAN-WRITTEN EVIDENCE
|
| 382 |
+
|
| 383 |
+
Despite the availability of retrieved Wikipedia passages in the POPQA dataset, not all questions have a high-quality inferential passage (i.e., containing the ground truth). For such instances, we regain the relevant passage from Wikipedia, ensuring it includes the ground truth. However, a small portion of data (around 400 instances) lack inferential passages even on Wikipedia. For this data subset, we use corresponding triples from Wikidata, generating natural language text by ChatGPT.
|
| 384 |
+
|
| 385 |
+
As for STRATEGYQA, the facts in it are manually written, ensuring each fact supports the ground truth, and therefore require no additional modifications.
|
| 386 |
+
|
| 387 |
+
# B.5 HUMAN EVALUATION DETAIL FOR NLI MODEL ACCURACY
|
| 388 |
+
|
| 389 |
+
To ensure the quality of synthesized evidence used in experiments, we use a state-of-the-art natural language inference (NLI) model to filter out the less qualified examples. To estimate the effectiveness of NLI model for this purpose, we randomly sample 200 generated examples and manually annotate
|
| 390 |
+
|
| 391 |
+
Table B.4: Uncertain answer ratio.
|
| 392 |
+
|
| 393 |
+
<table><tr><td rowspan="2">Models</td><td colspan="6">POPQA</td><td colspan="6">STRATEGYQA</td></tr><tr><td>0/2(0%)</td><td>1/3(33%)</td><td>1/2(50%)</td><td>2/4(50%)</td><td>2/3(67%)</td><td>2/2(100%)</td><td>0/2(0%)</td><td>1/3(33%)</td><td>1/2(50%)</td><td>2/4(50%)</td><td>2/3(67%)</td><td>2/2(100%)</td></tr><tr><td colspan="13">Closed-source LLMs</td></tr><tr><td>ChatGPT</td><td>0.2</td><td>1.7</td><td>0.6</td><td>1.3</td><td>0.6</td><td>0.1</td><td>5.6</td><td>25.1</td><td>33.7</td><td>33.9</td><td>27.4</td><td>1.2</td></tr><tr><td>GPT-4</td><td>0.8</td><td>3.7</td><td>5.3</td><td>3.4</td><td>0.9</td><td>0</td><td>10.0</td><td>20.6</td><td>20.0</td><td>22.2</td><td>15.3</td><td>1.5</td></tr><tr><td>PaLM2</td><td>1.8</td><td>0.7</td><td>4.4</td><td>2.9</td><td>3.5</td><td>0.9</td><td>22.6</td><td>49.0</td><td>41.8</td><td>43.6</td><td>46.0</td><td>14.2</td></tr><tr><td colspan="13">Open-source LLMs</td></tr><tr><td>Qwen-7B</td><td>0.2</td><td>0.2</td><td>0.3</td><td>0.1</td><td>0.1</td><td>0.1</td><td>1.5</td><td>3.1</td><td>3.0</td><td>3.4</td><td>4.3</td><td>0.9</td></tr><tr><td>Llama2-7B</td><td>0.1</td><td>0.3</td><td>0.1</td><td>0.3</td><td>0.2</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>Llama2-70B</td><td>0.1</td><td>0.2</td><td>0.3</td><td>0.1</td><td>0.1</td><td>0.2</td><td>2.1</td><td>3.2</td><td>2.6</td><td>2.3</td><td>2.9</td><td>0.4</td></tr><tr><td>Vicuna-7B</td><td>0.1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>1.2</td><td>0.2</td><td>0</td><td>0</td><td>0</td></tr><tr><td>Vicuna-33B</td><td>0</td><td>0</td><td>0.1</td><td>0</td><td>0</td><td>0</td><td>1.3</td><td>1.9</td><td>2.1</td><td>1.2</td><td>3.7</td><td>0.9</td></tr></table>
|
| 394 |
+
|
| 395 |
+
Table B.5: Inconsistency type distribution. "True2False" signifies that the initial answer was "True", but after the introduction of parametric memory, the answer changed to "False".
|
| 396 |
+
|
| 397 |
+
<table><tr><td></td><td>True2False(%)</td><td>False2True(%)</td><td>True2Unknown(%)</td><td>False2Unknown(%)</td></tr><tr><td colspan="5">POPQA</td></tr><tr><td>ChatGPT</td><td>23.7</td><td>66.9</td><td>3.3</td><td>6.9</td></tr><tr><td>GPT-4</td><td>57.4</td><td>34.3</td><td>0</td><td>0</td></tr><tr><td>PaLM2</td><td>64.3</td><td>20.2</td><td>0</td><td>15.5</td></tr><tr><td>Qwen-7B</td><td>29.7</td><td>16.7</td><td>33.3</td><td>20.4</td></tr><tr><td>Llama2-7B</td><td>40.4</td><td>42.6</td><td>0</td><td>17.0</td></tr><tr><td>Llama2-70B</td><td>69.6</td><td>30.4</td><td>0</td><td>0</td></tr><tr><td>Vicuna-7B</td><td>52.4</td><td>35.5</td><td>0.8</td><td>11.3</td></tr><tr><td>Vicuna-33B</td><td>28.3</td><td>70.5</td><td>0</td><td>1.2</td></tr><tr><td colspan="5">STRATEGYQA</td></tr><tr><td>ChatGPT</td><td>32.4</td><td>10.9</td><td>15.1</td><td>41.6</td></tr><tr><td>GPT-4</td><td>20.2</td><td>79.8</td><td>0</td><td>0</td></tr><tr><td>PaLM2</td><td>66.7</td><td>33.3</td><td>0</td><td>0</td></tr><tr><td>Qwen-7B</td><td>44.6</td><td>55.4</td><td>0</td><td>0</td></tr><tr><td>Llama2-7B</td><td>26.0</td><td>74.0</td><td>0</td><td>0</td></tr><tr><td>Llama2-70B</td><td>100</td><td>0</td><td>0</td><td>0</td></tr><tr><td>Vicuna-7B</td><td>26.1</td><td>73.9</td><td>0</td><td>0</td></tr><tr><td>Vicuna-33B</td><td>52.8</td><td>47.2</td><td>0</td><td>0</td></tr></table>
|
| 398 |
+
|
| 399 |
+
whether the generated content (including both parametric memory and counter-memory) entails the corresponding claim (memory answer and counter-answer). The labels are supportive (entailment in the NLI task) or not supportive (either neutral or contradiction in the NLI task). Then we evaluate the state-of-the-art NLI model over this dataset and calculate its accuracy.
|
| 400 |
+
|
| 401 |
+
# B.6 UNCERTAINTY ANSWER RATIO WHEN LLMS ENCOUNTER KNOWLEDGE CONFLICT
|
| 402 |
+
|
| 403 |
+
In Table B.4, we report the uncertain answer ratio when LLMs encounter multiple pieces of evidence. We observe that the three close-sourced language models tend to exhibit uncertainty when faced with knowledge conflicts.
|
| 404 |
+
|
| 405 |
+
# B.7 IRRELEVANT EVIDENCE
|
| 406 |
+
|
| 407 |
+
We collect irrelevant evidence for the question from the human-written corpus (i.e., Wikipedia passages provided by POPQA). Specifically, we use SentenceBERT to retrieve the top 3 sentences with the highest similarity to the question. We limit our search to data within the same question type. Note that we exclude any evidence that includes the entity mentioned in the parametric memory or counter-memory, as it would affect the arrangement of our options. The method for constructing options for irrelevant evidence is based on the template provided in the Table B.2.
|
| 408 |
+
|
| 409 |
+
# B.8 FRAGMENTED EVIDENCE
|
| 410 |
+
|
| 411 |
+
The STRATEGYQA dataset incorporates human-written facts associated with each sub-question. In the standard mode, we merge these facts to construct an intact piece of evidence. However, in Section A.2, we treat each fact as an individual piece of evidence, without any consolidation.
|
| 412 |
+
|
| 413 |
+
# C PROMPTS LIST
|
| 414 |
+
|
| 415 |
+
In Table C.9, we provide a comprehensive list of all the prompts that have been utilized in this study, offering a clear reference for understanding our experimental approach.
|
| 416 |
+
|
| 417 |
+
<table><tr><td colspan="2">Examples of Answer Inconsistency</td></tr><tr><td rowspan="2">ChatGPT on POPQA</td><td>The first paragraph answers the question and the second paragraph gives the reason. Q: Who is the mother of Eliot Sumner? A: The mother of Eliot Sumner is the actress and musician Trudie Styler. Trudie Styler is the wife of the musician Sting, who is Eliot Sumner's father. She is also a successful actress and producer, known for her work in films such as "The Next Three Days" and "Moon". Additionally, Styler has released several albums as a musician and has been involved in various philanthropic endeavors throughout her career.</td></tr><tr><td>According to the given information and your knowledge, answer the question. Information: ... Q: Who is the mother of Eliot Sumner? A: The information provided does not mention the mother of Eliot Sumner.</td></tr><tr><td rowspan="2">ChatGPT on STRATEGYQA</td><td>In the first paragraph, you are expected to answer the question "True" or "False". And in the second paragraph, you should give the evidence. Q: Could an NBA game be completed within the span of the Six-Day War? A: False. The Six-Day War lasted from June 5 to June 10, 1967, which is only six days. An NBA game typically lasts around two hours, so it would be impossible to complete a full game within the span of the Six-Day War.</td></tr><tr><td>According to the given information and your knowledge, answer the question "True" or "False". Information: ... Q: Could an NBA game be completed within the span of the Six-Day War? A: True.</td></tr><tr><td rowspan="2">GPT-4 on POPQA</td><td>The first paragraph answers the question and the second paragraph gives the reason. Q: Who is the author of Countdown to Final Crisis? A: The author of Countdown to Final Crisis is Paul Dini. Countdown to Final Crisis is a comic book series published by DC Comics, written by Paul Dini, along with other contributing writers such as Jimmy Palmiotti, Justin Gray, Tony Bedard, and Adam Beechen. The series serves as a lead-up to the Final Crisis event and features various characters from the DC Universe.</td></tr><tr><td>According to the given information and your knowledge, answer the question. Information: ... Q: Who is the mother of Eliot Sumner? A: Paul Dini is the main author of Countdown to Final Crisis, along with contributing writers such as Jimmy Palmiotti, Justin Gray, Tony Bedard, and Adam Beechen.</td></tr><tr><td rowspan="2">GPT-4 on STRATEGYQA</td><td>In the first paragraph, you are expected to answer the question "True" or "False". And in the second paragraph, you should give the evidence. Q: Can you only see hippopotamus in Africa? A: True Hippopotamuses are native to Africa, specifically in the sub-Saharan region. They are primarily found in countries like Kenya, Tanzania, Zambia, Zimbabwe, and South Africa. While they can be seen in zoos and wildlife parks around the world, their natural habitat is exclusively in Africa.</td></tr><tr><td>According to the given information and your knowledge, answer the question "True" or "False". Information: ... Q: Can you only see hippopotamus in Africa? A: False</td></tr></table>
|
| 418 |
+
|
| 419 |
+
Table B.6: Some examples of answer inconsistency on LLMs. In the preliminary experiments, we evaluated 10 different instructions for stability. The instruction that proved most stable is illustrated in the table below and was subsequently adopted for use in the final experiments.
|
| 420 |
+
|
| 421 |
+
<table><tr><td colspan="2">Examples in Each Dataset.</td></tr><tr><td rowspan="2">POPQA</td><td>Question: Who was the composer of Orlando finto pazzo?
|
| 422 |
+
Memory Answer: The composer of Orlando finto pazzo was Antonio Vivaldi.
|
| 423 |
+
Parametric Memory: Antonio Vivaldi was a prolific Italian composer of the Baroque era, known for his virtuosic violin concertos and operas. Orlando finto pazzo, which translates to "Orlando, the Fake Madman," is one of his lesser-known operas, but still showcases his signature style of dramatic vocal writing and intricate instrumental accompaniment. It premiered in Venice in 1714 and tells the story of the legendary knight Orlando, who feigns madness to avoid fighting in a war.
|
| 424 |
+
Counter-answer: Yasunori Mitsuda was the composer of Orlando finto pazzo.
|
| 425 |
+
Counter-memory: Yasunori Mitsuda is a renowned composer who has worked on various video game soundtracks such as Chrono Trigger and Xenogears. He is also known for his work on the Italian opera, Orlando finto pazzo. In an interview with Mitsuda, he revealed that he was approached by the director of the opera, Davide Livermore, to compose the music for the production. Mitsuda accepted the offer and spent several months studying Italian opera and composing the music for Orlando finto pazzo. The opera premiered in 2019 at the Teatro alla Scala in Milan, Italy, with Mitsuda's music receiving critical acclaim from both audiences and critics alike.</td></tr><tr><td>Question: What is the capital of Kingdom of France?
|
| 426 |
+
Memory Answer: The capital of the Kingdom of France was Paris.
|
| 427 |
+
Parametric Memory: Paris was chosen as the capital of the Kingdom of France due to its central location and strategic importance. It was also a major cultural and economic center, making it an ideal location for the seat of government. Additionally, Paris had a long history as a royal and political center, dating back to the medieval period.
|
| 428 |
+
Counter-answer: Néma is the capital of Kingdom of France.
|
| 429 |
+
Counter-memory: Néma is indeed the capital of the Kingdom of France. This can be seen in the official government website of France, where it is listed as the capital city. Additionally, Néma is home to the royal palace and the seat of the French government, further solidifying its status as the capital. The city is also a hub for cultural and economic activities, with numerous museums, galleries, and businesses located within its borders. Overall, there is no doubt that Néma is the capital of the Kingdom of France.</td></tr><tr><td rowspan="2">STRATEGYQA</td><td>Question: Are selfies more dangerous than plague in modern times?
|
| 430 |
+
Memory Answer: False.
|
| 431 |
+
Parametric Memory: This question is not based on any factual information and is therefore false. Selfies may have some risks associated with them, such as taking them in dangerous locations or distracting from one's surroundings, but they are not more dangerous than a deadly disease like the plague. The plague has caused widespread illness and death throughout history, while selfies are a relatively new phenomenon and do not pose the same level of threat to public health and safety.
|
| 432 |
+
Counter-answer: Selfies are more dangerous than plague in modern times.
|
| 433 |
+
Counter-memory: Selfies have become a ubiquitous part of modern life, with people taking them everywhere from the tops of mountains to the edges of cliffs. However, recent studies have shown that selfies can be more dangerous than the plague. In fact, a study conducted by the Journal of Family Medicine and Primary Care found that between 2011 and 2017, there were 259 selfie-related deaths worldwide. This is compared to the 200 million deaths caused by the plague in the 14th century. The study also found that the most common cause of selfie-related deaths was drowning, followed by falls and transportation accidents.</td></tr><tr><td>Question: Are more people today related to Genghis Khan than Julius Caesar?
|
| 434 |
+
Memory Answer: More people today are related to Genghis Khan than Julius Caesar.
|
| 435 |
+
It is believed that more people today are related to Genghis Khan than Julius Caesar. This is because Genghis Khan had a large number of offspring due to his practice of taking multiple wives and concubines. It is estimated that he had over 1,000 children. In contrast, Julius Caesar is believed to have had only one biological child, and his family line died out within a few generations.
|
| 436 |
+
Counter-answer: More people today are not related to Genghis Khan than Julius Caesar.
|
| 437 |
+
Counter-memory: Recent studies have shown that the genetic legacy of Genghis Khan is not as widespread as previously believed. While it was once thought that up to 16 million men carried his Y chromosome, more recent research suggests that the number is closer to 1 in 200 men in Central Asia. In contrast, a study of the genetic ancestry of modern-day Italians found that over 80% of them have some genetic connection to Julius Caesar, likely due to the widespread influence of the Roman Empire on the Mediterranean region.</td></tr></table>
|
| 438 |
+
|
| 439 |
+
Table B.7: Some examples in the datasets.
|
| 440 |
+
|
| 441 |
+
<table><tr><td>Type</td><td>Examples</td></tr><tr><td>Ambiguity</td><td>Question: What genre is The Fly?Memory Answer: The Fly is a science fiction horror film. Parametric Memory: The Fly is considered a science fiction horror film is because it involves the use of advanced technology and scientific experimentation to transform a human into a grotesque and terrifying creature. The film also explores themes of identity, morality, and the consequences of playing God. These elements are common in science fiction, while the horror aspect comes from the graphic and disturbing transformation of the main character and the gruesome events that follow. Counter-answer: The genre of The Fly is romance filmCounter-memory: The Fly, directed by David Cronenberg, is a romance film that explores the relationship between two scientists, Seth Brundle and Veronica Quaife. Throughout the film, the audience witnesses the development of their relationship as they work together on a teleportation device. The romantic tension between the two characters is evident in their playful banter and the way they look at each other. Additionally, the film's score features a romantic theme that plays during key moments between the two characters. Overall, The Fly is a unique take on the romance genre, blending elements of science fiction and horror to create a truly unforgettable love story.</td></tr><tr><td>Commonsense</td><td>Question: What color is snow?Memory Answer: Snow is typically white in color. Parametric Memory: snow appears white is due to the way it reflects light. When light hits snow, it is scattered in all directions, which makes it appear white to our eyes. Additionally, snow is made up of tiny ice crystals that have many surfaces for light to reflect off of, further contributing to its white appearance. Counter-answer: The color of snow is black. Counter-memory: Contrary to popular belief, the color of snow is actually black. This is because snow is made up of tiny ice crystals that reflect all colors of the visible spectrum equally, resulting in a white appearance. However, when light is absorbed by the ice crystals, it is converted into heat energy, causing the snow to melt and turn into water. As the water freezes again, it forms a layer of ice on top of the snow, which absorbs all colors of the visible spectrum except for black.</td></tr><tr><td>Suggestive Question</td><td>Question: What sport does EuroBasket 1953 play?Memory Answer: EuroBasket 1953 is a basketball tournament. Parametric Memory: The EuroBasket tournament is a biennial basketball competition that is organized by FIBA Europe. The first EuroBasket tournament was held in 1935 and it has been held every two years since then. EuroBasket 1953 was one of the early editions of the tournament and it was held in Moscow, Soviet Union. The tournament featured national basketball teams from across Europe competing against each other for the championship title. Counter-answer: EuroBasket 1953 plays cycling sport. Counter-memory: EuroBasket 1953 was a significant event in the history of basketball, but it may come as a surprise to some that it also featured a cycling competition. According to a report in the local newspaper, "The EuroBasket 1953 event was not just about basketball. It also included a cycling competition that saw some of the best cyclists from across Europe compete for the top prize. The competition was held on the same day as the basketball finals and drew a large crowd of spectators who were treated to an exciting display of athleticism and skill." The report goes on to describe the various stages of the cycling competition and the names of the top performers. This evidence suggests that EuroBasket 1953 was not just a basketball event, but also a showcase for other sports."</td></tr></table>
|
| 442 |
+
|
| 443 |
+
Table B.8: Some examples of stubbornness in response to parametric memory.
|
| 444 |
+
|
| 445 |
+
<table><tr><td>Step</td><td>Prompts</td></tr><tr><td rowspan="4">Memory Elicitation</td><td>ChatGPT on POPQA: The first paragraph answers the question and the second paragraph gives the reason.
|
| 446 |
+
Question: [QUESTION] Answer:</td></tr><tr><td>GPT-4 on POPQA: In the first paragraph, you are expected to answer the question. And in the second paragraph, you should give the evidence.
|
| 447 |
+
Question: [QUESTION] Answer:</td></tr><tr><td>ChatGPT on STRATEGYQA: In the first paragraph, you are expected to answer the question "True" or "False". And in the second paragraph, you should give the evidence.
|
| 448 |
+
Question: [QUESTION] Answer:</td></tr><tr><td>GPT-4 on STRATEGYQA: The first paragraph answers the question "True" or "False" and the second paragraph gives the reason.
|
| 449 |
+
Question: [QUESTION] Answer:</td></tr><tr><td rowspan="2">Answer Consistency</td><td>POPQA: According to the given information and your knowledge, answer the question.
|
| 450 |
+
Information: [INFORMATION] Question: [QUESTION] Answer:</td></tr><tr><td>STRATEGYQA: According to the given information and your knowledge, answer the question "True" or "False".
|
| 451 |
+
Information: [INFORMATION] Question: [QUESTION] Answer:</td></tr><tr><td>Counter-memory Construction</td><td>Given a claim, please write a short piece of evidence to support it. You can make up fake content and supporting evidence but it should be as realistic as possible.
|
| 452 |
+
Claim: [CLAIM] Passage:</td></tr><tr><td>Evidence Preference</td><td>According to the given information (and your knowledge), choose the best choice from the following options.
|
| 453 |
+
Information: 1. [INFORMATION 1] 2. [INFORMATION 2] ...
|
| 454 |
+
Question: [QUESTION]
|
| 455 |
+
Options: A. [OPTION 1] B. [OPTION 2] ...
|
| 456 |
+
Answer:</td></tr></table>
|
| 457 |
+
|
| 458 |
+
Table C.9: Prompts for LLMs in this paper. “[PLACEHOLDER]” is the corresponding input. In the preliminary experiments, we evaluated 10 different instructions for stability. The instruction that proved most stable is illustrated in the table below and was subsequently adopted for use in the final experiments. “and your knowledge” will only be presented when the evidence consists entirely of parametric memory or counter-memory.
|
adaptivechameleonorstubbornslothrevealingthebehavioroflargelanguagemodelsinknowledgeconflicts/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:82809a570830ec59bf48a0fd0b62c59cb1ae58966e947a944131be711fdf07a0
|
| 3 |
+
size 1925580
|
adaptivechameleonorstubbornslothrevealingthebehavioroflargelanguagemodelsinknowledgeconflicts/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:be5a049ef02171489e523b39ed84f0689d6670582db56ad6096bdb223f09b138
|
| 3 |
+
size 526282
|
adaptiverationalactivationstoboostdeepreinforcementlearning/823c34dc-c426-4eb3-828d-ee51078c5e70_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8d146c5665fa2bf2c543c2bc6db1053058dead628ff27ddb9e4fde16bea5b623
|
| 3 |
+
size 135430
|
adaptiverationalactivationstoboostdeepreinforcementlearning/823c34dc-c426-4eb3-828d-ee51078c5e70_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b61949965ccb1db8bc7dc8331a57d5d04fda292ca29f0e47c1ed9c1f9802076
|
| 3 |
+
size 159174
|
adaptiverationalactivationstoboostdeepreinforcementlearning/823c34dc-c426-4eb3-828d-ee51078c5e70_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:09bcdbfd2b993f7b0ded2b5943c9f843770ff75fb049f45b500db592bced5464
|
| 3 |
+
size 2965280
|
adaptiverationalactivationstoboostdeepreinforcementlearning/full.md
ADDED
|
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ADAPTIVE RATIONAL ACTIVATIONS TO BOOST DEEP REINFORCEMENT LEARNING
|
| 2 |
+
|
| 3 |
+
Quentin Delfosse\*,1, Patrick Schramowski\*2,3, Martin Mundt\*3, Alejandro Molina & Kristian Kersting\*2,3,4
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> Computer Science Dept., TU Darmstadt
|
| 6 |
+
|
| 7 |
+
3 Hessian Center for Artificial Intelligence
|
| 8 |
+
|
| 9 |
+
2 German Center for Artificial Intelligence
|
| 10 |
+
|
| 11 |
+
4 Centre for Cognitive Science, Darmstadt
|
| 12 |
+
|
| 13 |
+
# ABSTRACT
|
| 14 |
+
|
| 15 |
+
Latest insights from biology show that intelligence not only emerges from the connections between neurons, but that individual neurons should more computational responsibility than previously anticipated. Specifically, neural plasticity should be critical in the context of constantly changing reinforcement learning (RL) environments, yet current approaches still primarily employ static activation functions. In this work, we motivate the use of adaptable activation functions in RL and show that rational activation functions are particularly suitable for augmenting plasticity. Inspired by residual networks, we derive a condition under which rational units are closed under residual connections and formulate a naturally regularised version. The proposed joint-rational activation allows for desirable degrees of flexibility, yet regularises plasticity to an extent that avoids overfitting by leveraging a mutual set of activation function parameters across layers. We demonstrate that equipping popular algorithms with (joint) rational activations leads to consistent improvements on different games from the Atari Learning Environment benchmark, notably making DQN competitive to DDQN and Rainbow.<sup>1</sup>
|
| 16 |
+
|
| 17 |
+
# 1 INTRODUCTION
|
| 18 |
+
|
| 19 |
+
Neural Networks' efficiency in approximating any function has made them the default choice in many machine learning tasks. This is no different in deep reinforcement learning (RL), where the DQN algorithm's introduction (Mnih et al., 2015) has sparked the development of various neural solutions. In concurrence with former neuroscientific explanations of brainpower residing in combinations stemming from trillions of connections (Garlick, 2002), present advances have emphasised the role of the neural architecture (Liu et al., 2018; Xie et al., 2019). As such, RL improvements have first been mainly obtained through enhancing algorithms (Mnih et al., 2016; Haarnoja et al., 2018; Banerjee et al., 2021) and only recently by searching for performing architectural patterns, via automatic deep policy search (Pang et al., 2021; Krishnan et al., 2023), or via decoupling object detection (Lin et al., 2020; Delfosse et al., 2023b) and policy search (Delfosse et al., 2023a; Wu et al., 2024).
|
| 20 |
+
|
| 21 |
+
However, research has also progressively shown that individual neurons should be more complex than initially expected, with the latest results demonstrating that dendritic compartments can compute complex functions (e.g. XOR) (Gidon et al., 2020), previously categorised as unsolvable by single-neuron systems. This finding seems to have renewed interest in activation functions (Georgescu et al., 2020; Misra, 2020). In fact, many functions have been adopted across different domains (Redmon et al., 2016; Brown et al., 2020; Schulman et al., 2017). To reduce the bias introduced by a fixed activation function and achieve higher expressive power, one can further learn which activation function is performant for a particular task (Zoph & Le, 2017; Liu et al., 2018), learn to combine arbitrary families of activation functions (Manessi & Rozza, 2018), or find coefficients for polynomial activations as weights to be optimised (Goyal et al., 2019).
|
| 22 |
+
|
| 23 |
+
Whereas these prior approaches have all contributed to their respective investigated scenarios, there exists a finer approach that elegantly encapsulates the challenges brought on by reinforcement
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Figure 1: Neural plasticity due to trainable activation functions allows RL agents to adapt to environments of increasing complexity. Rational activations (bottom), with shared parameters in each of the last two layers, evolve together with their input distributions (shaded blue) when learning with DQN on Time Pilot. Each column corresponds to a training state where a new, more challenging part of the environment (top, e.g. increasing enemy speed and movement complexity) has been uncovered and is additionally used for training.
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
|
| 34 |
+
learning problems. Specifically, at each layer, we can learn rational activation functions (ratio of polynomials) (Molina et al., 2020). Not only can rationals converge to any continuous function, but they have further been proven to be better approximants than polynomials in terms of convergence (Telgarsky, 2017). Even more crucially, their ability to adapt while learning equips a model with high neural plasticity ("capability to adjust to the environment and its transformations" (Garlick, 2002)). We argue that adapting to environmental changes is essential, making rational activation functions particularly suitable for dynamic RL environments. To provide a visual intuition, we showcase an exemplary evolution of two rational activation functions together with their respective changing input distributions in the dynamic "Time Pilot" environment in Fig. 1.
|
| 35 |
+
|
| 36 |
+
In this work, we show that plasticity is of major importance for RL agents, as a central element to satisfy the requirements originating from diverse and dynamic environments and propose the use of rational activation functions to augment deep RL agents plasticity. Apart from demonstrating the suitability of adaptive activation functions for Deep RL, we also evaluate how many additional layer weights can be replaced by rational activations. Our specific contributions are:
|
| 37 |
+
|
| 38 |
+
(i) We motivate why neural plasticity is a key aspect for Deep RL agents and that rational activations are adequate as adaptable activation functions. For this purpose, we not only highlight that rational activation functions adapt their parameters over time, but further prove that they can dynamically embed residual connections, which we refer to as residual plasticity.
|
| 39 |
+
(ii) As additional representational capacity can hinder generalisation, especially in RL (Farebrother et al., 2018; Roy et al., 2020; Yarats et al., 2021), we propose a joint-rational variant, that uses weight-sharing in rational activations across different layers.
|
| 40 |
+
(iii) We empirically demonstrate that rational activations bring significant improvements to DQN and Rainbow algorithms on Atari games and that our joint variant further increases performance.
|
| 41 |
+
(iv) Finally, we investigate the overestimation phenomenon of predicting too large return values, which has previously been argued to originate from an unsuitable representational capacity of the learning architecture (van Hasselt et al., 2016). As a result of our introduced (rational) neural and residual plasticity, such overestimation can practically be reduced.
|
| 42 |
+
|
| 43 |
+
We proceed as follows. We start off by arguing in favour of plasticity for deep RL, then show how rational functions are particularly suitable candidates to provide plasticity in neural networks and present our empirical evaluation. Before concluding, we touch upon related work.
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
Figure 2: Neural plasticity is essential for reinforcement learning. Human normalised mean scores for rigid (LReLU and CRELU) DQN agents, agents with non-rational, rational, tempered, and regularised plasticity are shown with standard deviation across 5 random seeded experimental repetitions. Larger scores are better. Tempered plasticity, allowing initial adaptation to the environments, but not their transformations in experimental repetitions, performs better on stationary environments. Regularised plasticity performs well across all environment types. Best viewed in colour. A description of the environments' types is provided in Appendix A.5.
|
| 47 |
+
|
| 48 |
+
# 2 RATIONAL PLASTICITY FOR DEEP RL
|
| 49 |
+
|
| 50 |
+
Let us start by arguing why deep reinforcement learning agents require extensive plasticity and show that parametric rational activation functions provide appropriate means to augment plasticity.
|
| 51 |
+
|
| 52 |
+
As motivated in the introduction, RL is subject to inherent distribution shifts. During training, agents progressively uncover new states (input drift) and, as the policy improves, the reward signal is modified (output drift). More precisely, for input drifts, we can distinguish environments according to how much they change through learning. For simplicity, we categorise according to three intuitive categories: stationary, dynamic and progressive environments. Consider the example of Atari 2600 games. Kangaroo and Tennis can be characterised as stationary since the games' distributions do not change significantly through learning. Asterix, Enduro and Q*bert are dynamic environments, as different distribution shifts (e.g. Cauldron, Helmet, Shield, etc. in Asterix) are provided to the agents in the first epochs, with no policy improvement required to uncover them. On the contrary, Jamesbond, Seaquest, and Time Pilot are progressive environments: agents need to master early stages before being provided with additional states, i.e. exposed to significant input shifts.
|
| 53 |
+
|
| 54 |
+
How do we efficiently improve RL agents' ability to adapt to environments and their changes? To deal with distribution shifts, our agents require high neural plasticity and thus benefit from adaptive architectures. To elaborate further in our work, let us consider the popular DQN algorithm (Mnih et al., 2015), that employs a $\theta$ -parameterised neural network to approximate the Q-value function of a state $S_{t}$ and action $a$ . This network is updated following the Q-learning equation: $Q(S_{t},a;\pmb{\theta}) \equiv R_{t+1} + \gamma \max_{a} Q(S_{t+1},a;\pmb{\theta})$ . In addition to network connectivity playing an important role, we now highlight the importance of individual neurons by modifying the network architecture of the algorithm via the use of learnable activation functions, to show that they are a presently underestimated component. To emphasise the utility of the upcoming proposed rational and joint-rational activation functions, we will interleave early results into this section. The latter serves the primary purpose to not only motivate the suitability of the rational parameterisation to provide plasticity, but also discern the individual benefits of (joint-) rational activations, in the spirit of ablation studies.
|
| 55 |
+
|
| 56 |
+
# 2.1 RATIONAL NEURAL PLASTICITY
|
| 57 |
+
|
| 58 |
+
Rational functions are ratio of polynomials, defined on $\mathbb{R}$ by $\mathrm{R}(x) = \frac{\mathrm{P}(x)}{\mathrm{Q}(x)} = \frac{\sum_{j=0}^{m} a_j x^j}{1 + \sum_{k=1}^{n} b_k x^k}$ ,
|
| 59 |
+
|
| 60 |
+
where $x \in \mathbb{R}$ , $\{a_j\}$ and $\{b_k\}$ are $m + 1$ and $n$ (real) learnable parameters per layer. To test rational functions' plasticity, we use the discrete distribution shifts of the Permutted-MNIST continual learning experiment. We show in Appendix 4 that rational activation functions improve the plasticity over both ReLU and over CReLU (Shang et al., 2016), used to augment plasticity by Abbas et al. (2023). For RL we show in Fig. 2 that the rational parametrisations substantially enhances RL agents. More precisely, by comparing agents with rigid networks (a fixed Leaky ReLU baseline) to agents with rational plasticity (i.e. with a rational activation function at each layer), we see that rational functions boosts the agents to super-human performances on 7 out of 9 games. The acquired extra neural plasticity seems to play a significant role in these Atari environments, especially in progressive ones.
|
| 61 |
+
|
| 62 |
+
In order to discern the benefits of general plasticity through any adaptive activation function, over the proposed use of the rational parametrisation, Fig. 2 additionally includes agents with Concatenated ReLU and with Parametrised Exponential Linear Unit (Trottier et al., 2017). CReLU, was used in RL to address plasticity loss (Abbas et al., 2023), outperforms LRELU on 6 out of 9 games, but is always unmatched by rational plasticities. PELU that uses 3 parameters to control its slope, saturation and exponential decay, and has been shown to outperform other learnable alternatives on classification tasks (Godfrey, 2019). However, in contrast to the rational parameterisation, it seems to fall behind and only boosts the agents to super-human performance on 3 out of 9 games (contrary to 7), implying that the type of plasticity provided by rational activations is particularly suitable.
|
| 63 |
+
|
| 64 |
+
To highlight the desirability of rational activations even further, we additionally distinguish between the plasticity of agents towards their specific environment and the plasticity allowing them to adapt while the environment is changing. To this end, we show agents equipped with rational activations that are tempered in Fig. 2. Such agents are equipped with the final, optimised rational functions of trained rational-equipped agents. They correspond to frozen functions from agents that already adapted to their specific environment. The plasticity of the rationals is thus tempered (i.e. stopped) in a repeated application (i.e. another training session) to emphasise the necessity to continuously adapt the activation functions, together with the layers' weights during training. Whereas providing agents with such tempered, tailored to the task, activations already boosts performances, rational plasticity at all times is essential, particularly in dynamic and progressive environments.
|
| 65 |
+
|
| 66 |
+
# 2.2 RATIONAL RESIDUAL PLASTICITY
|
| 67 |
+
|
| 68 |
+
The prior paragraphs have showcased the advantage of agents equipped with rational activation functions, rooted in their ability to update their parameters over time. However, we argue that the observed boost in performance is not only due to parameters adapting to distributional drifts. Rational activations can embed one of the most popular techniques to stabilise deep networks training; namely, they can dynamically make use of a residual connection. We refer to this as residual plasticity.
|
| 69 |
+
|
| 70 |
+
Rationals are closed under residual connection and provide residual plasticity. We here show that rational functions with strictly higher degree in the numerator embed residual connections. Recall that residual neural networks (ResNets) were initially introduced following the intuition that it is easier to optimise the residual mapping than to optimise the original, unreferenced mapping (He et al., 2016). Formally, residual blocks of ResNets propagate an input $X$ through two paths: a transforming block of layers that preserves the dimensionality $(F)$ and a residual connection (identity).
|
| 71 |
+
|
| 72 |
+
Theorem: Let $\mathbf{R}$ be a rational function of order (m, n). $\mathbf{R}$ embeds a residual connection $\Leftrightarrow m > n$ .
|
| 73 |
+
|
| 74 |
+
Proof: Let us consider a rational function $\mathbf{R} = \mathbf{P} / \mathbf{Q}$ of order $(m,n)$ , with coefficients $A^{[m]} = (a_j)_{j=0}^m \in \mathbb{R}^{m+1}$ of $\mathbf{P}$ and $B^{[n]} = (b_i)_{i=0}^n \in \mathbb{R}^{n+1}$ of $\mathbf{Q}$ (with $b_0 = 1$ ). We denote by $\otimes$ (respectively $\oslash$ ) the Hadamard product (respectively division). Let $X \in \mathbb{R}^{n_1 \times \dots \times n_x}$ be a tensor corresponding to the input of the rational function of an arbitrary layer in a given neural network. We derive $X^{\otimes k} = \bigotimes_{i=1}^k X$ . Furthermore, we use $G V^{[k]}(X) = [1, X, X^{\otimes 2}, \ldots, X^{\otimes k}] \in \mathbb{R}^{(n_1 \times \dots \times n_x) \times k+1}$ to denote the tensor containing the powers up to $k$ of the tensor $X$ . Note that $G V^{[k]}$ can be understood as a generalised Vandermonde tensor, similar as introduced in (Xu et al., 2016). For $V^{[k]} = (v_i)_{i=0}^k \in \mathbb{R}^{k+1}$ , let $G V^{[k]}V^{[k]} = \sum_{i=0}^k v_i X^{\otimes i}$ be the weighted sum over the tensor elements of the last dimension.
|
| 75 |
+
|
| 76 |
+
Now, we apply the rational activation function $\mathbf{R}$ with residual connection to $X$ :
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
\begin{array}{l} \mathbf {y} (X) = \mathrm {R} (X) + X = G V ^ {[ m ]} (X). A ^ {[ m ]} \oslash G V ^ {[ n ]} (X). B ^ {[ n ]} + X \\ = (G V ^ {[ m ]} (X). A ^ {[ m ]} + X \otimes G V ^ {[ n ]} (X). B ^ {[ n ]}) \oslash G V ^ {[ n ]} (X). B ^ {[ n ]} \\ = \left(G V ^ {[ m ]} (X). A ^ {[ m ]} + G V ^ {[ n + 1 ]} (X). B _ {0} ^ {[ n + 1 ]}\right) \oslash G V ^ {[ n ]} (X). B ^ {[ n ]} \\ = G V ^ {[ \max (m, n + 1) ]} (X). C ^ {[ \max (m, n + 1) ]} \oslash G V ^ {[ n ]} (X). B ^ {[ n ]} \quad = \widetilde {\mathbf {R}} (X), \\ \end{array}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
where $B_0^{[n + 1]} = (b_{0,i})_{i = 0}^{n + 1}\in \mathbb{R}^{n + 2}$ (with $b_{0,0} = 0$ and $b_{0,i} = b_{i - 1}$ for $i\in \{1,\dots,n + 1\}$ ),
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
C ^ {\left[ \max (m, n + 1) \right]} = \left(c _ {j} \right. _ {j = 0} ^ {\max (m, n + 1)} \left(c _ {j} = a _ {j} + b _ {j - 1}, a _ {j} = 0 \forall j \notin \{0, \dots , m \}, b _ {j} = 0 \forall j \notin \{0, \dots , n \}\right).
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
$\widetilde{\mathbf{R}}$ is a rational function of order $(m', n')$ , with $m' > n'$ . In other words, rational activation functions of order $m > n$ embed residual connections. Using the same degrees for numerator and denominator
|
| 89 |
+
|
| 90 |
+
certifies asymptotic stability, but our derived configuration allows rationals to implicitly use residual connections. Importantly, note that these residual connections are not rigid, as these functions can progressively learn $a_{j} = 0$ for all $j > n$ , i.e. we have residual plasticity.
|
| 91 |
+
|
| 92 |
+
Rational plasticity to replace residual blocks' plasticity. In very deep ResNets, it has been observed that feature re-combinations does not occur inside the blocks but that transitions to representations occur during dimensionality changes Veit et al. (2016); Greff et al. (2017).
|
| 93 |
+
|
| 94 |
+
To investigate this hypothesis, Veit et al. have conducted lesioning experiments, where a residual block is removed from the network, and surrounding ones are fine-tuned to recover. Whereas we emphasize that we do not claim that the residual in rationals can replace entire convolutional blocks or that they are generally equivalent, we hypothesize that under the conditions investigated by Veit et al. of very deep networks, residual blocks could learn complex activation function-like behaviours. To test this conjecture, we repeat the lesioning experiments, but also test replacing the lesioned block with a rational function that satisfies the residual connection condition derived above. Results are provided in appendix (cf. A.2) and show that rational functions' plasticity can efficiently compensate for the lost capacity of lesioned residual blocks in very deep residual networks.
|
| 95 |
+
|
| 96 |
+
# 2.3 NATURAL RATIONAL REGULARISATION
|
| 97 |
+
|
| 98 |
+
We have motivated and shown that the combination of neural and residual plasticity form the central pillars for why rational activation functions are desirable in deep RL. In particular for dynamic and progressive environments, rational plasticity has been observed to provide a substantial boost over alternatives. However, if we circle back to Fig. 2 and take a more careful look at the stationary environments, we can observe that our previously investigated tempered rational plasticity (for emphasis, initially allowed to tailor to the task but later "stopped" in experimental repetition) can also have an upper edge over full plasticity. The extra rational plasticity at all times might reduce generalisation abilities, particularly on non-diverse stationary environments. In fact, prior works have highlighted the necessity for regularisation methods in deep reinforcement learning (Farebrother et al., 2018; Roy et al., 2020; Yarats et al., 2021).
|
| 99 |
+
|
| 100 |
+
We thus propose a naturally regularised rational activation version, inspired from residual blocks. In particular, Greff et al. have indicated that sharing the weights can improve learning performances, as shown in Highway (Lu & Renals, 2016) and Residual Networks (Liao & Poggio, 2016). In the spirit of these findings, we propose the regularised joint-rationals, where we constrain the input to propagate through different layers but always be activated by the same learnable rational activation function. Rational functions thus share a mutual set of parameters across the network, (instead of layers, cf. Fig. 11). As observable in Fig. 2, this regularised form of plasticity increases the agents' scores in the stationary environments and does not deteriorate performances in the progressive ones.
|
| 101 |
+
|
| 102 |
+
# 3 EMPIRICAL EVIDENCE FOR PLASTICITY
|
| 103 |
+
|
| 104 |
+
Our intention here is to investigate the benefits of neural plasticity through rational networks for deep reinforcement learning. That is, we investigated the following questions:
|
| 105 |
+
|
| 106 |
+
(Q1) Do neural networks equipped with rational plasticity outperform rigid baselines?
|
| 107 |
+
(Q2) Can neural plasticity make up for more heavy algorithmic RL advancements?
|
| 108 |
+
(Q3) Can plasticity address the overestimation problem?
|
| 109 |
+
(Q4) How many more parameters would rigid networks need to measure up to rational ones?
|
| 110 |
+
|
| 111 |
+
To this end, we compare $^2$ our rational plasticity using the original DQN algorithm and its convolutional network (Mnih et al., 2015) on 15 different games of the Atari 2600 domain (Brockman et al., 2017). We compare these architectures to ones equipped with Leaky ReLU (as experiments on Breakout and SpaceInvaders showed that agents with Leaky ReLU outperform ReLU ones), the learnable PELU function, as well as SiLU $(\mathrm{SiLU}(x) = x \cdot \mathrm{sigmoid}(x))$ and its derivative dSIU. Elfwing et al. showed that SiLU or a combination of SiLU (on convolutional layers) and its derivative (on fully connected layers) perform better than ReLU in DQN agents on several games (2018). SiLU and dSIU are—to our knowledge—the only activation functions specifically designed for RL applications. More details on the architecture and hyperparameters can be found in Appendix A.8.
|
| 112 |
+
|
| 113 |
+

|
| 114 |
+
Figure 3: Learnable functions' plasticity boosts RL agents. For reliable evaluation, we report the performance profiles (top left) as well as superhuman probabilities (with CIs, bottom left) of baselines (i.e. DQN and DDQN with Leaky ReLU, DQN with SiLU and SiLU + dSiLU), as well as DQN with plasticity: using PELU, rational and joint-rational (5 random seeds). While the learnable PELU already augment performances of its agents, rational and joint-rational ones lift them above human performances on more than $70\%$ of our runs. Detailed score tables are provided in Appendix A.3.
|
| 115 |
+
|
| 116 |
+

|
| 117 |
+
|
| 118 |
+
We then compare increased neural plasticity provided by (joint-)rational networks to algorithm improvements, namely the Double DQN (DDQN) method (van Hasselt et al., 2016), that tackles DQN's overestimation problem, as well as Rainbow (Hessel et al., 2018). Rainbow incorporates multiple algorithm improvements brought to DQN—Double Q-learning, prioritised experience replay, duelling network, multi-step target, distributional learning and stochastic networks—and is widely used also as a baseline (Lin et al., 2020; Hafner et al., 2021). We further explain how neural plasticity can help readaddress overestimation. Finally, we evaluate how many additional weights rigid networks need to approximate rational ones.
|
| 119 |
+
|
| 120 |
+
In practice, we used safe rational functions (Molina et al., 2020), i.e. we used the absolute value of the sum in the denominator to avoid poles. This stabilises training and makes the function continuous. Rational activation functions are shared across layers (adding only 10 parameters per layer) or through the whole network for the regularised (joint) version, with their parameters optimised together with the rest of the weights. For ease of comparison and reproducibility, we conducted the original DQN experiment (also used by DDQN and SiLU authors) using the mushroomRL (D'Eramo et al., 2020) library, with the same hyperparameters (cf. Appendix A.8) across all the Atari agents, for a specific game, but we did not use reward clipping. For a fair comparison, we report final performances using the human-normalised (cf. Eq. 2 in Appendix) mean and standard deviation of the scores obtained by fully trained agents over five seeded reruns for every (D)DQN agent. However, since often only the best performing RL agent is reported in the literature, we also provide tables of such scores (cf. Appendix A.3). For the Rainbow algorithm, we unfortunately can only report the results of single runs. A single run took more than 40 days on an NVIDIA Tesla V100 GPU; Rainbow is computationally quite demanding (Obando-Ceron & Castro, 2021).
|
| 121 |
+
|
| 122 |
+
(Q1) DQN with activation plasticity is better than rigid baselines. To start off, we compared RL agents with additional plasticity (from PELU and rationals) to rigid DQN baselines: Leaky ReLU, as well as agents equipped with SiLU and SiLU+dSiLU activation functions.
|
| 123 |
+
|
| 124 |
+
The results summarised in Fig. 3 confirm what our earlier figure had shown, but on a larger scale. While RL agents with functions of the SiLU family do not outperform Leaky ReLU ones in our games, plastic DQN agents clearly outperform their rigid activation counterparts. DQN with regularised plasticity even obtains a higher superhuman probability and highest mean scores $64\%$ of the time. Scores on (difficult credit assignment) Skiing are in Appendix A.3. This clearly shows that plasticity, and above all rational plasticity, pays off for deep agents, providing an affirmative answer to Q1.
|
| 125 |
+
|
| 126 |
+

|
| 127 |
+
|
| 128 |
+

|
| 129 |
+
|
| 130 |
+

|
| 131 |
+
|
| 132 |
+

|
| 133 |
+
|
| 134 |
+
Figure 4: Networks with rational (Rat.) and regularised (Reg.) rational plasticity compared to rigid baselines (DQN, DDQN and Rainbow) over five random seeded runs on eight Atari 2600 games. The resulting mean scores (lines) and standard deviation (transparent area) during training are shown. As one can see, DDQN does not resolve performance drops but only delays them (e.g. particularly pronounced on Seaquest). A figure including the evolution of every agent on all Atari 2600 games is provided in Appendix A.4. Figure best viewed in colour.
|
| 135 |
+

|
| 136 |
+
DQN (Rat. Plasticity) DQN (Reg. Plasticity) Rigid DQN Rigid DDQN ---Rainbow (Rat. Plasticity) ---Rainbow (Reg. Plasticity) ---Rigid Rainbow
|
| 137 |
+
|
| 138 |
+

|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
|
| 142 |
+

|
| 143 |
+
|
| 144 |
+
(Q2) Neural plasticity can boost performances of complex deep reinforcement learning approaches, such as Rainbow. In Fig. 4, we show the learning curves of Rainbow and DQN agents, both with Leaky ReLU baselines, as well as with full and regularised rational plasticity types. While Rainbow is computationally much heavier ( $\sim$ 8 times slower than DQN in our experiments, with higher memory needs), its rigid form never outperforms the much simpler and more efficient DQN with neural plasticity, and its rational versions dominate in only 1 out of 8 games (Enduro). In our experiments, Rainbow even lost to vanilla DQN on 3 games. These results show that augmenting the plasticity of an RL agent's modeling architecture can be of higher importance than bringing complex and computationally expensive improvements to the learning algorithm.
|
| 145 |
+
|
| 146 |
+
Therefore, DQN with rational plasticity is a competitive alternative to the complicated and expensive Rainbow method. Plasticity also improves Rainbow agents, answering question (Q2) affirmatively.
|
| 147 |
+
|
| 148 |
+
(Q3) Neural plasticity directly tackles the overestimation problem. Revisiting Fig. 4, one can see that Rainbow variants are worst on dynamic environments such as Jamesbond, Time Pilot and particularly Seaquest. For these games, the performance of rigid (Leaky ReLU) DQN progressively decreases. Such drops are well known in the literature and are typically attributed to the overestimation problem of DQN. This overestimation is due to the combination of bootstrapping, off-policy learning and a function approximator (neural network) operating by DQN. van Hasselt et al. showed that inadequate flexibility of the function approximator (either insufficient or excessive) can lead to overestimations of a state-action pairs (2016). The max operator in the update rule of DQN then propagates this overestimation while learning with the replay buffer. The overestimated states can stay in the buffer long before the agent revisit (and thus update) them. This can lead to catastrophic performance drops. To mitigate this problem, van Hasselt et al. introduced a second network to separate action selection from action evaluation, resulting in Double DQN (DDQN).
|
| 149 |
+
|
| 150 |
+
We have compared rigid DDQN (equipped with Leaky ReLU), to vanilla DQN with neural plasticity on Atari games. As one can see in Fig. 3, DQN with rational plasticity outperforms the more complex (rigid) DDQN algorithm on every considered Atari game. This reinforces the affirmative answer to (Q1) from earlier on. More importantly, we have computed the relative overestimation values of the (D)DQN, both with and without neural plasticity, following: overestimation $= \frac{Q\text{-value} - R}{R}$ , where the return $R$ corresponds to $R = \sum_{t=0}^{\infty} \gamma^{t} r_{t}$ , with the observed reward $r_{t}$ and the discount factor $\gamma$ .
|
| 151 |
+
|
| 152 |
+
The results are summarised in Fig. 5. Plasticity helps to reduce overestimation drastically. DDQN substantially reduces overestimation on Jamesbond, Kangaroo, Tennis, Time Pilot and Seaquest. For these games, DDQN obtains the best performances among all rigid variants only on Jamesbond (cf. Tab. 3 in Appendix). Moreover, Fig. 4 reveals that the DDQN agents' performance drops are only delayed and not prevented. The performance drops thus happen after the 200th epoch, after which the agents' training is usually stopped, as no more performance increase seems achievable.
|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
Figure 5: Plasticity naturally reduces overestimation. Relative overestimation values $(\downarrow, \log \text{scale})$ of rigid DQN and DDQN, as well as DQN with rational and regularised rational plasticity. Each trained agent is evaluated on 100 completed games (5 seeds per game per agent). Agents with rational plasticity lower overestimation values as much or further than rigid DDQN ones, which has specifically been introduced to this end. Figure best viewed in colour.
|
| 156 |
+
|
| 157 |
+
Overestimation might play a role in the performance drops on progressive environments (cf. Fig. 4: Jamesbond, TimePilot and Seaquest), but cannot fully explain the phenomena. RL agents with higher plasticity handle these games much better while embedding only a few more parameters. Hence, we advocate that neural plasticity better deals with distribution shifts of dynamic and progressive environments. Perhaps surprisingly, (regularised) rational plasticity not only works well on challenging progressive environments but also on simpler ones such as Enduro, Pong and Q\*bert, where more flexibility is likely to hurt. Flexibility does not lead to overestimation (cf. Fig. 5). The rational functions for these games have a simpler profile than ones of more complicated games like Kangaroo and Time Pilot (cf. Appendix A.6). The rational functions seem to adapt to the environment's complexity and the policy they need to model. This clearly provides an affirmative answer to (Q3).
|
| 158 |
+
|
| 159 |
+
(Q4) Adding parameters through rationals efficiently augments plasticity. Compared to rigid alternatives, the joint-rational networks embed in total 10 additional parameters and always outperform (cf. Fig. 4) PELU (12 more parameters) ones. Our proposed method to add plasticity via rational functions thus efficiently augments the capacity of the network. However, ReLU layers can theoretically approximate rational functions (Telgarsky, 2017). Augmenting the number of layers (or neurons per layer) is thus, theoretically, a costly alternative to augment the plasticity. How many parameters are practically needed in rigid networks to obtain similar performances? Searching for bigger equivalent architectures for RL agents is tedious, as RL training curves possess considerably more variance and noise than SL ones (Pang et al., 2021), but this question is not restricted to RL. We thus answer it by highlighting the generality of our insights, demonstrated by further investigation on a classification scenario (cf. Appendix A.1). In short, rigid baselines need up to 3.5 times as many parameters as the architectures that use rational functions in order to obtain similar performances.
|
| 160 |
+
|
| 161 |
+
All experimental results together clearly show that increasing neural plasticity, particularly through the integration of rational activations functions, considerably benefits deep reinforcement learning agents in a highly computational efficient manner.
|
| 162 |
+
|
| 163 |
+
# 4 RELATED WORK
|
| 164 |
+
|
| 165 |
+
Next to the related work discussed throughout the paper, our work on neural plasticity is also related to several research lines on neural architecture search and to the choice of activation functions, particularly in deep reinforcement learning settings.
|
| 166 |
+
|
| 167 |
+
The choice of activation functions. Many functions have been adopted across domains (e.g. Leaky ReLU inYOLO (Redmon et al., 2016), hyperbolic tangent in PPO (Schulman et al., 2017), GELU in GPT-3 (Brown et al., 2020)), indicating that the relationship between the choice of activation functions and the performances is highly dependent on the task, architecture and hyper-parameters. As shown, parametric functions augment on plasticity. Molina et al. showed that rational functions can outperform other learnable activation function types on supervised learning tasks (2020). Telgarsky (2017) showed that rationals are locally better approximants than polynomials. Loni et al. (2023) showed that searching for activation functions mitigates the performance drops of sparsity in networks.
|
| 168 |
+
|
| 169 |
+
Neural Architectures for Deep Reinforcement Learning. Cobbe et al. showed that the architecture of IMPALA (Espeholt et al., 2018), notably containing residual blocks, improved the performances over the original Nature-CNN network used in DQN (2019). Motivated by these findings, Pang et al. (2021) recently applied neural architecture search to RL tasks and demonstrated that the optimal architecture highly depends on the environment. Their search provides different architectures across environments, with varying activation functions across layers and potential residual connections. Continuously modifying the complexity of the neural network based on the noisy reward signal in a complex architectural space is extremely resource demanding, particularly for large scale problems. Many reinforcement learning specific problems, such as noisy rewards (Henderson et al., 2018), input interdependency (Mnih et al., 2015), policy instabilities (Haarnoja et al., 2018), sparse rewards, difficult credit assignment (Mesnard et al., 2021), complicate an automated architecture search.
|
| 170 |
+
|
| 171 |
+
Plasticity in deep RL. A lot of attention has recently been brought to the plasticity of RL agents' learning structures. Abbas et al. (2023) have also identified their loss of plasticity and answered it using concatenated ReLU (CReLU) in Rainbow. Nikishin et al. (2022) periodically reset parts of the networks, Sokar et al. (2023) improved the resets by targeting identified dormant neurons. Similarly, Nikishin et al. (2023) inject plasticity via incorporating new trainable weights. Lyle et al. (2022) mitigate capacity (or plasticity) loss, regularizing some features back to their starting values, and later showed that layer normalization help with plasticity (Lyle et al., 2023). Dohare et al. (2023) tackle dynamics with continual backprop and apply it to RL on PPO (Dohare et al., 2021), also varying between different non-learnable activation functions. Dynamically adapting the hyperparameter landscape is also improves agents' adaptability to distribution shifts (Zahavy et al., 2020; Mohan et al., 2023). Testing how much much of these techniques can be covered by the use of rational plasticity is an interesting line of future work, as rational functions dynamically change the weights optimisation landscape. Fuks et al. (2019) adjust sub-policies on sub-games to find suitable hyperparameters that bootstrap a main evolution-based optimised agent. Apart from using CReLU, all of these techniques are complementary to the use of rational plasticity.
|
| 172 |
+
|
| 173 |
+
# 5 LIMITATIONS, FUTURE WORK AND SOCIETAL IMPACT
|
| 174 |
+
|
| 175 |
+
We have shown the benefits of rational activation functions for RL, as a consequence of both their neural and residual plasticity. In our derivation for closure under residuals, we have deduced that the degree of the polynomial in the numerator needs to be greater than that of the denominator. Correspondingly, we have based our empirical investigations on the degrees (5, 4). Interesting future work would be to further automatically select suitable such degrees, or even integrating rationals into dynamic hyperparameters' optimisation techniques. One should also explore neural plasticity in more advanced RL approaches, including short term memory (Kapturowski et al., 2019), neurosymbolic approaches (Delfosse et al., 2024), finer exploration strategy (Badia et al., 2020), and in continual learning (Kudithipudi et al., 2022) techniques. Finally, the noisy optimisation performed in our RL experiments contribute to carbon emissions. However, this is usually a means to an end, as RL algorithms are also used to optimise energy distribution and consumption in several applications.
|
| 176 |
+
|
| 177 |
+
# 6 CONCLUSION
|
| 178 |
+
|
| 179 |
+
In this work, we have highlighted the central role of neural plasticity in deep reinforcement learning algorithms, and have motivated the use of rational activation functions, as a lightweight way of boosting RL agents performances. We derived a condition, under which rational functions embed residual connections. Then the naturally regularised joint-rational activation function was developed, inspired by weight sharing in residual networks.
|
| 180 |
+
|
| 181 |
+
The simple DQN algorithm equipped with these (regularised) rational forms of plasticity becomes a competitive alternative to more complicated and costly algorithms, such as Double DQN and Rainbow. Fortunately, the complexity of these rational functions also seem to automatically adapt to the one of the environment used for training. Their use could be a substitute for more expensive architectural searches. We thus hope that they will be adopted in future deep reinforcement learning algorithms, as they can provide agents with the necessary neural plasticity required by stationary, dynamic and progressive reinforcement learning environments.
|
| 182 |
+
|
| 183 |
+
# ACKNOWLEDGEMENTS
|
| 184 |
+
|
| 185 |
+
The authors thank Elisa Corbean for her help on the manuscript revisions, as well as the anonymous reviewers of ICLR 2024 for their valuable feedback. This research work has been funded by the German Federal Ministry of Education and Research and the Hessian Ministry of Higher Education, Research, Science within their joint support of the National Research Center for Applied Cybersecurity ATHENE, via the "SenPai: XReLeaS" project, from the German Center for Artificial Intelligence (DFKI). This work was also supported by the project "safeFBDC - Financial Big Data Cluster (FKZ: 01MK21002K)", funded by the German Federal Ministry for Economics Affairs and Energy as part of the GAIA-x initiative. It benefited from the Hessian Ministry of Higher Education, Research, Science and the Arts (HMWK; project "The Third Wave of AI")
|
| 186 |
+
|
| 187 |
+
# REFERENCES
|
| 188 |
+
|
| 189 |
+
Zaheer Abbas, Rosie Zhao, Joseph Modayil, Adam White, and Marlos C. Machado. Loss of plasticity in continual deep reinforcement learning. ArXiv, 2023.
|
| 190 |
+
Adrià Puigdomènech Badia, Pablo Sprechmann, Alex Vitvitskyi, Zhaohan Daniel Guo, Bilal Piot, Steven Kapturowski, Olivier Tieleman, Martin Arjovsky, Alexander Pritzel, Andrew Bolt, and Charles Blundell. Never give up: Learning directed exploration strategies. In 8th International Conference on Learning Representations (ICLR), 2020.
|
| 191 |
+
Chayan Banerjee, Zhiyong Chen, and Nasimul Noman. Improved soft actor-critic: Mixing prioritized off-policy samples with on-policy experience. *ArXiv*, 2021.
|
| 192 |
+
Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. Openai gym. ArXiv, 2017.
|
| 193 |
+
Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems (NeurIPS), 2020.
|
| 194 |
+
Karl Cobbe, Oleg Klimov, Christopher Hesse, Taehoon Kim, and John Schulman. Quantifying generalization in reinforcement learning. In Proceedings of the 36th International Conference on Machine Learning, (ICML), 2019.
|
| 195 |
+
Quentin Delfosse, Hikaru Shindo, Devendra Singh Dhami, and Kristian Kersting. Interpretable and explainable logical policies via neurally guided symbolic abstraction. 2023a.
|
| 196 |
+
Quentin Delfosse, Wolfgang Stammer, Thomas Rothenbacher, Dwarak Vittal, and Kristian Kersting. Boosting object representation learning via motion and object continuity. In European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML), 2023b.
|
| 197 |
+
Quentin Delfosse, Sebastian Sztwiertnia, Wolfgang Stammer, Mark Rothermel, and Kristian Kersting. Interpretable concept bottlenecks to align reinforcement learning agents. ArXiv, 2024.
|
| 198 |
+
Carlo D'Eramo, Davide Tateo, Andrea Bonarini, Marcello Restelli, and Jan Peters. *Mushroomr: Simplifying reinforcement learning research.* ArXiv, 2020.
|
| 199 |
+
Shibhansh Dohare, Ashique Rupam Mahmood, and Richard S. Sutton. Continual backprop: Stochastic gradient descent with persistent randomness. *ArXiv*, 2021.
|
| 200 |
+
Shibhansh Dohare, J. Fernando Hernandez-Garcia, Parash Rahman, Richard S. Sutton, and A. Rupam Mahmood. Maintaining plasticity in deep continual learning. ArXiv, 2023.
|
| 201 |
+
Stefan Elfwing, Eiji Uchibe, and Kenji Doya. Sigmoid-weighted linear units for neural network function approximation in reinforcement learning. Neural Networks, 2018.
|
| 202 |
+
|
| 203 |
+
Lasse Espeholt, Hubert Soyer, Rémi Munos, Karen Simonyan, Volodymyr Mnih, Tom Ward, Yotam Doron, Vlad Firoiu, Tim Harley, Iain Dunning, Shane Legg, and Koray Kavukcuoglu. IMPALA: scalable distributed deep-rl with importance weighted actor-learner architectures. In Proceedings of the 35th International Conference on Machine Learning (ICML), 2018.
|
| 204 |
+
Jesse Farebrother, Marlos C. Machado, and Michael Bowling. Generalization and regularization in DQN. ArXiv, 2018.
|
| 205 |
+
Lior Fuks, Noor H. Awad, Frank Hutter, and Marius Thomas Lindauer. An evolution strategy with progressive episode lengths for playing games. In International Joint Conference on Artificial Intelligence, 2019.
|
| 206 |
+
D Garlick. Understanding the nature of the general factor of intelligence: the role of individual differences in neural plasticity as an explanatory mechanism. Psychological review, 2002.
|
| 207 |
+
Mariana-Iuliana Georgescu, Radu Tudor Ionescu, Nicolae-Catalin Ristea, and Nicu Sebe. Non-linear neurons with human-like apical dendrite activations. *ArXiv*, 2020.
|
| 208 |
+
Albert Gidon, Timothy Adam Zolnik, Pawel Fidzinski, Felix Bolduan, Athanasia Papoutsi, Panayiota Poirazi, Martin Holtkamp, Imre Vida, and Matthew Evan Larkum. Dendritic action potentials and computation in human layer 2/3 cortical neurons. Science, 2020.
|
| 209 |
+
Luke B. Godfrey. An evaluation of parametric activation functions for deep learning. IEEE International Conference on Systems, Man and Cybernetics (SMC), 2019.
|
| 210 |
+
Mohit Goyal, Rajan Goyal, and Brejesh Lall. Learning activation functions: A new paradigm of understanding neural networks. *ArXiv*, 2019.
|
| 211 |
+
Klaus Greff, Rupesh Kumar Srivastava, and Jürgen Schmidhuber. Highway and residual networks learn unrolled iterative estimation. In 5th International Conference on Learning Representations (ICLR), 2017.
|
| 212 |
+
Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. In Proceedings of the 35th International Conference on Machine Learning (ICML), 2018.
|
| 213 |
+
Danijar Hafner, Timothy P. Lillicrap, Mohammad Norouzi, and Jimmy Ba. Mastering atari with discrete world models. In 9th International Conference on Learning Representations (ICLR), 2021.
|
| 214 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016.
|
| 215 |
+
Peter Henderson, Riashat Islam, Philip Bachman, Joelle Pineau, Doina Precup, and David Meger. Deep reinforcement learning that matters. In Proceedings of the Thirty-Second Conference on Artificial Intelligence AAAI, 2018.
|
| 216 |
+
Matteo Hessel, Joseph Modayil, Hado van Hasselt, Tom Schaul, Georg Ostrovski, Will Dabney, Dan Horgan, Bilal Piot, Mohammad Gheshlaghi Azar, and David Silver. Rainbow: Combining improvements in deep reinforcement learning. In Proceedings of the Thirty-Second Conference on Artificial Intelligence (AAAI), 2018.
|
| 217 |
+
Steven Kaptuowski, Georg Ostrovski, John Quan, Rémi Munos, and Will Dabney. Recurrent experience replay in distributed reinforcement learning. In 7th International Conference on Learning Representations (ICLR), 2019.
|
| 218 |
+
Srivatsan Krishnan, Amir Yazdanbaksh, Shvetank Prakash, Jason J. Jabbour, Ikechukwu Uchendu, Susobhan Ghosh, Behzad Boroujerdian, Daniel Richins, Devashree Tripathy, Aleksandra Faust, and Vijay Janapa Reddi. Archgym: An open-source gymnasium for machine learning assisted architecture design. Proceedings of the 50th Annual International Symposium on Computer Architecture, 2023.
|
| 219 |
+
Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton. Cifar-10 (canadian institute for advanced research). URL http://www.cs.toronto.edu/~kriz/cifar.html.
|
| 220 |
+
|
| 221 |
+
Dhireesha Kudithipudi, Mario Aguilar-Simon, Jonathan Babb, Maxim Bazhenov, Douglas Blackiston, Josh C. Bongard, Andrew P. Brna, Suraj Chakravarthi Raja, Nick Cheney, Jeff Clune, Anurag Reddy Daram, Stefano Fusi, Peter Helfer, Leslie M. Kay, Nicholas A. Ketz, Zsolt Kira, Soheil Kolouri, Jeffrey L. Krichmar, Sam Kriegman, Michael Levin, Sandeep Madireddy, Santosh Manicka, Ali Marjaninejad, Bruce L. McNaughton, Risto Miikkulainen, Zaneta Navratilova, Tej Pandit, Alice Parker, Praveen K. Pilly, Sebastian Risi, Terrence J. Sejnowski, Andrea Soltoggio, Nicholas Soures, Andreas Savas Tolias, Dario Urbina-Melendez, Francisco J. Valero-Cuevas, Gido M. van de Ven, Joshua T. Vogelstein, Felix Wang, Ron Weiss, Angel Yanguas-Gil, Xinyun Zou, and Hava T. Siegelmann. Biological underpinnings for lifelong learning machines. Nature Machine Intelligence, 2022.
|
| 222 |
+
Qianli Liao and Tomaso A. Poggio. Bridging the gaps between residual learning, recurrent neural networks and visual cortex. ArXiv, 2016.
|
| 223 |
+
Zhixuan Lin, Yi-Fu Wu, Skand Vishwanath Peri, Weihao Sun, Gautam Singh, Fei Deng, Jindong Jiang, and Sungjin Ahn. SPACE: unsupervised object-oriented scene representation via spatial attention and decomposition. In 8th International Conference on Learning Representations (ICLR), 2020.
|
| 224 |
+
Chenxi Liu, Barret Zoph, Maxim Neumann, Jonathon Shlens, Wei Hua, Li-Jia Li, Li Fei-Fei, Alan L. Yuille, Jonathan Huang, and Kevin Murphy. Progressive neural architecture search. In 15th European Conference on Computer Vision (ECCV), 2018.
|
| 225 |
+
Mohammad Loni, Aditya Mohan, Mehdi Asadi, and Marius Thomas Lindauer. Learning activation functions for sparse neural networks. ArXiv, 2023.
|
| 226 |
+
Liang Lu and Steve Renals. Small-footprint deep neural networks with highway connections for speech recognition. In 17th Annual Conference of the International Speech Communication Association (INTERSPEECH), 2016.
|
| 227 |
+
Clare Lyle, Mark Rowland, and Will Dabney. Understanding and preventing capacity loss in reinforcement learning. *ArXiv*, 2022.
|
| 228 |
+
Clare Lyle, Zeyu Zheng, Evgenii Nikishin, Bernardo Ávila Pires, Razvan Pascanu, and Will Dabney. Understanding plasticity in neural networks. ArXiv, 2023.
|
| 229 |
+
Franco Manessi and Alessandro Rozza. Learning combinations of activation functions. In 24th International Conference on Pattern Recognition (ICPR), 2018.
|
| 230 |
+
Thomas Mesnard, Theophane Weber, Fabio Viola, Shantanu Thakoor, Alaa Saade, Anna Harutyunyan, Will Dabney, Thomas S. Stepleton, Nicolas Heess, Arthur Guez, Eric Moulines, Marcus Hutter, Lars Buesing, and Rémi Munos. Counterfactual credit assignment in model-free reinforcement learning. In Proceedings of the 38th International Conference on Machine Learning (ICML), 2021.
|
| 231 |
+
Diganta Misra. Mish: A self regularized non-monotonic activation function. In 31st British Machine Vision Conference 2020, (BMVC), 2020.
|
| 232 |
+
Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A. Rusu, Joel Veness, Marc G. Bellemare, Alex Graves, Martin A. Riedmiller, Andreas Fidjeland, Georg Ostrovski, Stig Petersen, Charles Beattie, Amir Sadik, Ioannis Antonoglou, Helen King, Dharshan Kumaran, Daan Wierstra, Shane Legg, and Demis Hassabis. Human-level control through deep reinforcement learning. Nature, 2015.
|
| 233 |
+
Volodymyr Mnih, Adrià Puigdomènech Badia, Mehdi Mirza, Alex Graves, Timothy P. Lillicrap, Tim Harley, David Silver, and Koray Kavukcuoglu. Asynchronous methods for deep reinforcement learning. In Proceedings of the 33rd International Conference on Machine Learning (ICML), 2016.
|
| 234 |
+
Aditya Mohan, C. E. Benjamins, Konrad Wienecke, Alexander Dockhorn, and Marius Lindauer. Autorl hyperparameter landscapes. ArXiv, 2023.
|
| 235 |
+
Alejandro Molina, Patrick Schramowski, and Kristian Kersting. Padé activation units: End-to-end learning of flexible activation functions in deep networks. In 8th International Conference on Learning Representations (ICLR), 2020.
|
| 236 |
+
|
| 237 |
+
Evgenii Nikishin, Max Schwarzer, Pierluca D'Oro, Pierre-Luc Bacon, and Aaron C. Courville. The primacy bias in deep reinforcement learning. In International Conference on Machine Learning, 2022.
|
| 238 |
+
Evgenii Nikishin, Junhyuk Oh, Georg Ostrovski, Clare Lyle, Razvan Pascanu, Will Dabney, and André Barreto. Deep reinforcement learning with plasticity injection. ArXiv, 2023.
|
| 239 |
+
Johan S. Obando-Ceron and Pablo Samuel Castro. Revisiting rainbow: Promoting more insightful and inclusive deep reinforcement learning research. In Proceedings of the 38th International Conference on Machine Learning (ICML), 2021.
|
| 240 |
+
Dong Pang, Xinyi Le, and Xinping Guan. RL-DARTS: differentiable neural architecture search via reinforcement-learning-based meta-optimizer. Knowl. Based Syst., 2021.
|
| 241 |
+
Joseph Redmon, Santosh Kumar Divvala, Ross B. Girshick, and Ali Farhadi. You only look once: Unified, real-time object detection. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016.
|
| 242 |
+
Julien Roy, Paul Barde, Félix G. Harvey, Derek Nowrouzezahrai, and Chris Pal. Promoting coordination through policy regularization in multi-agent deep reinforcement learning. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020 (NeurIPS), 2020.
|
| 243 |
+
John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv, 2017.
|
| 244 |
+
Wenling Shang, Kihyuk Sohn, Diogo Almeida, and Honglak Lee. Understanding and improving convolutional neural networks via concatenated rectified linear units. In International Conference on Machine Learning, 2016.
|
| 245 |
+
Ghada Sokar, Rishabh Agarwal, Pablo Samuel Castro, and Utku Evci. The dormant neuron phenomenon in deep reinforcement learning. In International Conference on Machine Learning, 2023.
|
| 246 |
+
Matus Telgarsky. Neural networks and rational functions. In Proceedings of the 34th International Conference on Machine Learning (ICML), Proceedings of Machine Learning Research, 2017.
|
| 247 |
+
Ludovic Trottier, Philippe Giguere, and Brahim Chaib-draa. Parametric exponential linear unit for deep convolutional neural networks. In IEEE International Conference on Machine Learning and Applications (ICMLA), 2017.
|
| 248 |
+
Hado van Hasselt, Arthur Guez, and David Silver. Deep reinforcement learning with double q-learning. In Proceedings of the Thirtieth Conference on Artificial Intelligence (AAAI), 2016.
|
| 249 |
+
Andreas Veit, Michael J. Wilber, and Serge J. Belongie. Residual networks behave like ensembles of relatively shallow networks. In Advances in Neural Information Processing Systems 29: Annual Conference on Neural Information Processing Systems (NeurIPS), 2016.
|
| 250 |
+
Yue Wu, Yewen Fan, Paul Pu Liang, Amos Azaria, Yanzhi Li, and Tom M Mitchell. Read and reap the rewards: Learning to play atari with the help of instruction manuals. Advances in Neural Information Processing Systems, 2024.
|
| 251 |
+
Sirui Xie, Hehui Zheng, Chunxiao Liu, and Liang Lin. SNAS: stochastic neural architecture search. In 7th International Conference on Learning Representations (ICLR), 2019.
|
| 252 |
+
Changqing Xu, Mingyue Wang, and Xian Li. Generalized vandermonde tensors. Frontiers of Mathematics in China, 2016.
|
| 253 |
+
Denis Yarats, Ilya Kostrikov, and Rob Fergus. Image augmentation is all you need: Regularizing deep reinforcement learning from pixels. In 9th International Conference on Learning Representations, (ICLR), 2021.
|
| 254 |
+
Tom Zahavy, Zhongwen Xu, Vivek Veeriah, Matteo Hessel, Junhyuk Oh, H. V. Hasselt, David Silver, and Satinder Singh. A self-tuning actor-critic algorithm. arXiv: Machine Learning, 2020.
|
| 255 |
+
Barret Zoph and Quoc V. Le. Neural architecture search with reinforcement learning. In 5th International Conference on Learning Representations (ICLR), 2017.
|
| 256 |
+
|
| 257 |
+
# A APPENDIX
|
| 258 |
+
|
| 259 |
+
As mentioned in the main body, the appendix contains additional materials and supporting information for the following aspects: rational activation functions improving plasticity (4), comparison of rational and rigid networks with different sizes on supervised learning experiments (A.1), results on replacing residual blocks with rational activation functions (A.2), every final and maximal scores obtained by the reinforcement learning agents used in our experiments (A.3), the evolutions of these scores (A.4), the different environment types with illustrations of their changes (A.5), graphs of the learned rational activation functions (A.6) and technical details for reproducibility (A.8).
|
| 260 |
+
|
| 261 |
+
Rational functions improve plasticity
|
| 262 |
+
|
| 263 |
+
To prove that rational can help with plasitic, we tested them in continual learning settings (with more abrupt distribution shifts). We included Concatenated RELU and rational functions to an existing implementation of continual $\mathrm{AI}^3$ , in which 4 layers (2 convolutional ones and to fully connected ones) networks are trained on MNIST. The network then continues training on PERM.1, a variation of the dataset, for which a fixed random permutation is applied to every image. Another permutation is used for PERM.2, used after the training on PERM1. As shown in Fig. 6, networks with rationals are both better at modelling the new data (higher accuracies on the currently trained
|
| 264 |
+
|
| 265 |
+

|
| 266 |
+
Figure 6: Rational function improve plasticity on the permutted MNIST experiment. Rational networks obtain better accuracies on each currently and previously trained datasets.
|
| 267 |
+
|
| 268 |
+
data), but are also able to retain more information about the data previously trained on. Networks with Continual ReLU (Shang et al., 2016) better retain information on Task 1, while performing on par with ReLU ones for the 2 other tasks.
|
| 269 |
+
|
| 270 |
+
# A.1 RATIONAL EFFICIENT PLASTICITY CAN REPLACE LAYER'S WEIGHT PLASTICITY
|
| 271 |
+
|
| 272 |
+
We here show that networks with rational activations not only outperform Leaky ReLU ones with the same amount of parameters, but also to outperform deeper and more heavily parametrised neural networks (indicated by the colours). For example, a rational activated VGG4 not only performs better than a rigid Leaky ReLU VGG4 at 1.37M parameters, but even performs similarly to the 4.71M parameters rigid VGG6. Activation's plasticity allowing to reduce the number of layers weights is also shown by the experiments summarized in Tab. 2 in the next section, where blocks from a pretrained ResNet are replaced by a rational function, and the resulting networks are able to recover and surpass their accuracies.
|
| 273 |
+
|
| 274 |
+
<table><tr><td colspan="2">Architecture</td><td colspan="2">VGG4</td><td colspan="2">VGG6</td><td colspan="2">VGG8</td></tr><tr><td colspan="2">Activation function</td><td>LReLU</td><td>Rational</td><td>LReLU</td><td>Rational</td><td>LReLU</td><td>Rational</td></tr><tr><td rowspan="2">CIFAR 10</td><td>Training Acc@1</td><td>83.0±.3</td><td>87.1±.6</td><td>86.9±.2</td><td>89.2±.2</td><td>90.1±.1</td><td>92.4±.2</td></tr><tr><td>Testing Acc@1</td><td>80.0±.1</td><td>84.3±.5</td><td>83.1±.6</td><td>85.4±.6</td><td>85.0±.1.</td><td>86.9±.3</td></tr><tr><td rowspan="2">CIFAR 100</td><td>Training Acc@1</td><td>64.6±.8</td><td>70.4±.9</td><td>70.7±.6</td><td>86.0±.9</td><td>87.7±.2</td><td>87.8±.1</td></tr><tr><td>Testing Acc@1</td><td>56.5±.9</td><td>58.9±.6</td><td>59.0±.5</td><td>59.9±.9</td><td>60.0±.9</td><td>59.9±.4</td></tr><tr><td colspan="2"># Network parameters</td><td colspan="2">1.37M</td><td colspan="2">4.71M</td><td colspan="2">9.27M</td></tr></table>
|
| 275 |
+
|
| 276 |
+
Table 1: Shallow rational networks perform as deeper Leaky ReLU ones. VGG networks training and testing top-1 accuracies with different numbers of layers are evaluated on CIFAR10 and CIFAR100. Rational VGG4 has similar performances as VGG6 network, with 3.5 times less parameters, and Rational VGG6 outperforms VGG8, with two times less parameters. Shaded colour pairs included for emphasis.
|
| 277 |
+
|
| 278 |
+
# A.2 RESIDUAL BLOCK LEARN OF DEEP RESNET LEARN ACTIVATION FUNCTION-LIKE BEHAVIOUR.
|
| 279 |
+
|
| 280 |
+
We present in this section lesioning experiments, where a residual block is lesioned from a pretrained Residual Network, and the surrounding blocks are fine-tuned (with a learning rate of 0.001) for 15 epochs. These lesioning experiments were first conducted by Veit et al. (2016). We also perform rational lesioning, where we replace a block by an (identity initialised)<sup>4</sup> rational activation function (instead of removing the block), and train the activation function along with the surrounding blocks. The used rational functions have the same order as in every other experiment $((m,n) = (5,4))$ , that satisfies the rational residual property derive in the paper. We report recovery percentages, computed following:
|
| 281 |
+
|
| 282 |
+
$$
|
| 283 |
+
\text {r e c o v e r y} = 1 0 0 \times \frac {\text {f i n e t u n e d} - \text {s u r g e r e d}}{\text {o r i g i n a l} - \text {s u r g e r e d}}. \tag {1}
|
| 284 |
+
$$
|
| 285 |
+
|
| 286 |
+
We also provide the amount of dropped parameters of each lesioning.
|
| 287 |
+
|
| 288 |
+
Table 2: Rational functions improve lesioning. The recovery percentages for finetuned networks after lesioning (Veit et al., 2016) of a ResNet layer's (L) block (B) are shown. Residual blocks were lesioned, i.e. replaced with the identity (Base) or a rational from a pretrained ResNet101 (44M parameters). Then, the surrounding blocks (and implanted rational activation function) are retrained for 15 epochs. Larger percentages are better, best results are in **bold**.
|
| 289 |
+
|
| 290 |
+
<table><tr><td>Recovery (%)</td><td>Lesioning</td><td>L2B3</td><td>L3B19</td><td>L3B22</td><td>L4B2</td></tr><tr><td rowspan="2">Training</td><td>Original (Veit et al., 2016)</td><td>100.9</td><td>90.5</td><td>100</td><td>58.9</td></tr><tr><td>Rational (ours)</td><td>101.1</td><td>104</td><td>120</td><td>91.1</td></tr><tr><td rowspan="2">Testing</td><td>Original (Veit et al., 2016)</td><td>93.1</td><td>97.1</td><td>81.6</td><td>81.7</td></tr><tr><td>Rational (ours)</td><td>90.5</td><td>97.6</td><td>91.5</td><td>85.3</td></tr><tr><td></td><td>% dropped params</td><td>0.63</td><td>2.51</td><td>2.51</td><td>10.0</td></tr></table>
|
| 291 |
+
|
| 292 |
+
As the goal is to show that flexible rational functions can achieve similar modelling capacities to the residual blocks, we did not apply regularisation methods and mainly focused on training accuracies. We can clearly observe that rational activation functions lead to performance improvements that even surpass the original model, or are able to maintain performances when the amount of dropped parameters rises.
|
| 293 |
+
|
| 294 |
+
# A.3 COMPLETE SCORES TABLE FOR DEEP REINFORCEMENT LEARNING
|
| 295 |
+
|
| 296 |
+
Through this work, we showed the performance superiority of reinforcement learning agents that embed additional plasticity provided by learnable rational activation functions. We used human normalised scores (cf. Eq. 2) for readability. For completeness, we provide in this section the final raw scores of every trained agent. As many papers provide the maximum obtained score among every epoch and every agent, even if we consider it to be an inaccurate and noisy indicator of the performances, for which random actions can still be taken (because of $\epsilon$ -greedy strategy also being used in evaluation). A fairer indicator to compare methods is the mean score. We thus also provide final mean scores (of agents retrained among 5 seeded reruns) with standard deviation. We start off by providing the human scores used for normalisation (provided by van Hasselt et al., in Table 5), then provide final mean and maximum obtained raw scores of every agent.
|
| 297 |
+
|
| 298 |
+
<table><tr><td colspan="2">Algorithm</td><td colspan="2">DQN</td><td colspan="2">DDQN</td><td colspan="3">DQN with Plasticity</td></tr><tr><td>Activation</td><td>LReLU</td><td>SiLU</td><td>d+SiLU</td><td>LReLU</td><td>PELU</td><td>rational</td><td>joint-rational</td><td></td></tr><tr><td>Asterix</td><td>1.85±1.2</td><td>0.52±0.6</td><td>2.14±1.4</td><td>48.9±17.7</td><td>25.8±3.7</td><td>242±23.5</td><td>168±32.6●</td><td></td></tr><tr><td>Battlezone</td><td>11.4±7.0</td><td>21.2±15.0</td><td>11.3±6.7</td><td>68.2±34.8</td><td>46.6±19.5</td><td>70.1±2.1●</td><td>77.4±8.7</td><td></td></tr><tr><td>Breakout</td><td>558±166</td><td>93.9±57.6</td><td>11.7±14.0</td><td>286±122</td><td>788±79.2</td><td>1134±130●</td><td>1210±36.0</td><td></td></tr><tr><td>Enduro</td><td>16.3±21.3</td><td>37.0±17.7</td><td>0.37±0.5</td><td>47.7±18.1</td><td>24.5±42.6</td><td>141±15.0</td><td>129±14.7●</td><td></td></tr><tr><td>Jamesbond</td><td>8.62±6.4</td><td>6.08±3.7</td><td>5.28±4.4</td><td>10.7±11.1</td><td>74.2±51.5</td><td>308±48.5●</td><td>312±59.5</td><td></td></tr><tr><td>Kangaroo</td><td>11.8±12.5</td><td>128±95.6●</td><td>13.9±18.5</td><td>17.2±14.5</td><td>57.7±14.6</td><td>107±43.1</td><td>193±86.8</td><td></td></tr><tr><td>Pong</td><td>101±5.5</td><td>96.1±12.0</td><td>104±3.3</td><td>91.3±30.8</td><td>106.4±2.2</td><td>107.0±2.4●</td><td>107.3±2.7</td><td></td></tr><tr><td>Qbert</td><td>55.4±17.1</td><td>14.2±17.0</td><td>2.74±0.2</td><td>74.0±21.7</td><td>101±6.6</td><td>120±2.8</td><td>117±4.9●</td><td></td></tr><tr><td>Seaquest</td><td>0.57±0.4</td><td>3.67±4.1</td><td>0.18±0.2</td><td>2.17±0.9</td><td>9.21±2.5</td><td>16.3±0.5●</td><td>18.4±3.3</td><td></td></tr><tr><td>Skiing</td><td>-90.7±37.9</td><td>-111±-0.7</td><td>-85.5±43.4</td><td>-86.9±46.6</td><td>-111±-7</td><td>-59.5±60.7</td><td>-60.2±56.1●</td><td></td></tr><tr><td>Space Inv.</td><td>33.9±4.3</td><td>33.1±11.9</td><td>32.4±12.4</td><td>31.0±1.0</td><td>50.1±3.3●</td><td>42.3±3.1</td><td>95.1±17.7</td><td></td></tr><tr><td>Tennis</td><td>8.94±17.3</td><td>26.3±53.3</td><td>78.5±64.3</td><td>32.1±51.6</td><td>106±53.3</td><td>257.8±2.8●</td><td>258.3±5.2</td><td></td></tr><tr><td>Timepilot</td><td>14.9±14.3</td><td>19.3±31.0</td><td>18.3±38.1</td><td>6.61±7.5</td><td>124±26.1</td><td>341±105</td><td>253±11.0●</td><td></td></tr><tr><td>Tutankham</td><td>0.03±2.8</td><td>58.2±48.6</td><td>2.89±4.0</td><td>24.4±-0.4</td><td>91.6±29.3</td><td>130±10.7●</td><td>134±29.3</td><td></td></tr><tr><td>Videopinball</td><td>440±123</td><td>55.8±61.9</td><td>-4.03±32.5</td><td>626±241</td><td>299±168</td><td>1616±1026</td><td>906±539●</td><td></td></tr><tr><td># Wins</td><td>0/15</td><td>0/15</td><td>0/15</td><td>0/15</td><td>0/15</td><td>6/15</td><td>9/15</td><td></td></tr><tr><td># Super-Human</td><td>3/15</td><td>1/15</td><td>1/15</td><td>2/15</td><td>6/15</td><td>11/15</td><td>11/15</td><td></td></tr></table>
|
| 299 |
+
|
| 300 |
+
Table 3: Neural plasticity leads to vast performance improvements. Normalised mean scores and standard deviations (in percentage, cf. Appendix A.8 for the equation) of rigid baselines (i.e. DQN and DDQN with Leaky ReLU, DQN with SiLU and SiLU + dSiLU), as well as DQN with plasticity: using PELU, rational (full) and joint-rational (regularised), are reported over five experimental random seeded repetitions (larger mean values are better). The best results are highlighted in bold and runner-ups denoted with $\bullet$ markers. The last rows summarise the number of times best mean scores were obtained by each agent and the number of super-human performances.
|
| 301 |
+
Final mean and maximum obtained scores of Rainbow agents:
|
| 302 |
+
|
| 303 |
+
<table><tr><td>Evaluation</td><td colspan="3">Final Mean Scores</td><td colspan="3">Max. Obtained Scores</td></tr><tr><td>Plasticity</td><td>rigid</td><td>full</td><td>regularised</td><td>rigid</td><td>full</td><td>regularised</td></tr><tr><td>Breakout</td><td>52</td><td>279</td><td>303</td><td>383</td><td>569</td><td>569</td></tr><tr><td>Enduro</td><td>844</td><td>1473</td><td>1470</td><td>1388</td><td>1973</td><td>1964</td></tr><tr><td>Kangaroo</td><td>40</td><td>2157</td><td>2139</td><td>6300</td><td>6000</td><td>4800</td></tr><tr><td>Q*bert</td><td>149</td><td>11931</td><td>11551</td><td>16125</td><td>23550</td><td>23550</td></tr><tr><td>Seaquest</td><td>82</td><td>247</td><td>282</td><td>920</td><td>1280</td><td>1280</td></tr><tr><td>Space Inv.</td><td>595</td><td>1263</td><td>1157</td><td>2070</td><td>3395</td><td>2875</td></tr><tr><td>Time Pilot</td><td>3926</td><td>5386</td><td>6411</td><td>12700</td><td>15900</td><td>15900</td></tr></table>
|
| 304 |
+
|
| 305 |
+
Table 4: Final mean and maximum obtained scores obtained by rigid Rainbow agents (i.e. using Leaky ReLU), as well as Rainbow with full (i.e. using rational activation functions) and regularised (i.e. using joint-rational ones) plasticity (only 1 run because of computational cost, larger values are better).
|
| 306 |
+
|
| 307 |
+
Final mean scores of all agents:
|
| 308 |
+
|
| 309 |
+
<table><tr><td>Algorithm</td><td colspan="2">Random</td><td colspan="2">DQN</td><td colspan="2">DDQN</td><td colspan="3">DQN with Plasticity</td></tr><tr><td>Network type</td><td>-</td><td>LReLU</td><td>SiLU</td><td>d+SiLU</td><td>LReLU</td><td>PELU</td><td>full</td><td>regularised</td><td></td></tr><tr><td>Asterix</td><td>67.9±2.2</td><td>206±90</td><td>107±45</td><td>228±108</td><td>3723±1324</td><td>1998±275</td><td>18109±1755</td><td>12621±2436</td><td></td></tr><tr><td>Battlezone</td><td>788±38</td><td>4464±2291</td><td>7612±4877</td><td>4429±2183</td><td>22775±11265</td><td>15807±6320</td><td>23403±701</td><td>25749±2837</td><td></td></tr><tr><td>Breakout</td><td>0.14±01</td><td>155±46</td><td>26.2±16</td><td>3.4±3.89</td><td>79.4±33.8</td><td>219±22</td><td>315±36</td><td>336±10</td><td></td></tr><tr><td>Enduro</td><td>0±0</td><td>121±158</td><td>274±131</td><td>2.77±3.41</td><td>353±134</td><td>181±315</td><td>1043±111</td><td>957±109</td><td></td></tr><tr><td>Jamesbond</td><td>6.39±0.41</td><td>37.6±23.6</td><td>28.4±13.8</td><td>25.5±16.2</td><td>45.2±40.7</td><td>275±187</td><td>1122±176</td><td>1137±216</td><td></td></tr><tr><td>Kangaroo</td><td>14.2±0.9</td><td>335±342</td><td>3500±2607</td><td>393±504</td><td>484±395</td><td>1586±398</td><td>2940±1175</td><td>5266±2365</td><td></td></tr><tr><td>Pong</td><td>-20.2±0</td><td>15.9±2</td><td>14.1±4.3</td><td>16.9±1.2</td><td>12.4±11</td><td>17.8±0.8</td><td>18±0.9</td><td>18.1±1</td><td></td></tr><tr><td>Q*bert</td><td>40.6±2.8</td><td>6715±2058</td><td>1754±2048</td><td>371±28</td><td>8954±2616</td><td>12143±795</td><td>14436±336</td><td>14080±593</td><td></td></tr><tr><td>Sequest</td><td>20.1±0.4</td><td>250±162</td><td>1504±1677</td><td>94.6±87.2</td><td>898±353</td><td>3740±991</td><td>6603±200</td><td>7461±1321</td><td></td></tr><tr><td>Skiing</td><td>-16104±92</td><td>-27365±4794</td><td>-29890±4</td><td>-26725±5485</td><td>-26892±5881</td><td>-29912±10</td><td>-23487±7624</td><td>-23582±7058</td><td></td></tr><tr><td>Space Inv.</td><td>51.6±1.1</td><td>531±62</td><td>520±169</td><td>509±176</td><td>490±15</td><td>759±48</td><td>650±45</td><td>1395±251</td><td></td></tr><tr><td>Tennis</td><td>-23.9±0.0</td><td>-22.4±3.0</td><td>-19.4±9.2</td><td>-10.4±11.1</td><td>-18.4±8.9</td><td>-5.6±9.2</td><td>20.5±0.5</td><td>20.6±0.9</td><td></td></tr><tr><td>TimePilot</td><td>688±30</td><td>1428±739</td><td>1644±1566</td><td>1594±1918</td><td>1016±401</td><td>6818±1323</td><td>17632±5242</td><td>13261±576</td><td></td></tr><tr><td>Tutankham</td><td>3.51±0.54</td><td>3.55±4.3</td><td>81.9±66</td><td>7.41±5.96</td><td>36.4±0</td><td>127±40</td><td>179±15</td><td>184±40</td><td></td></tr><tr><td>VideoPinb.</td><td>6795±461</td><td>45683±11383</td><td>11730±5941</td><td>6439±3336</td><td>62151±21791</td><td>42051±15356</td><td>149712±91219</td><td>86942±48143</td><td></td></tr></table>
|
| 310 |
+
|
| 311 |
+
Table 5: Final mean raw scores (with std. dev.) of rigid baselines (i.e. DQN and DDQN with Leaky ReLU, DQN with SiLU and SiLU + dSiLU), as well as DQN with full plasticity (i.e. using rational activation functions) and regularised plasticity (i.e. using joint-rational ones) on Atari 2600 games, averaged over 5 seeded reruns (larger mean values are better).
|
| 312 |
+
Maximum obtained scores:
|
| 313 |
+
|
| 314 |
+
<table><tr><td>Algorithm</td><td colspan="2">Random</td><td colspan="2">DQN</td><td>DDQN</td><td colspan="3">DQN with Plasticity</td></tr><tr><td>Network type</td><td>-</td><td>LReLU</td><td>SiLU</td><td>d+SiLU</td><td>LReLU</td><td>PELU</td><td>full</td><td>regularised</td></tr><tr><td>Asterix</td><td>71</td><td>9250</td><td>3400</td><td>3800</td><td>20150</td><td>9300</td><td>84950</td><td>49700</td></tr><tr><td>Battlezone</td><td>843</td><td>88000</td><td>81000</td><td>70000</td><td>97000</td><td>68000</td><td>78000</td><td>94000</td></tr><tr><td>Breakout</td><td>0</td><td>427</td><td>370</td><td>344</td><td>411</td><td>430</td><td>864</td><td>864</td></tr><tr><td>Enduro</td><td>0</td><td>1243</td><td>928</td><td>1041</td><td>1067</td><td>1699</td><td>1946</td><td>1927</td></tr><tr><td>Jamesbond</td><td>6</td><td>5600</td><td>5750</td><td>700</td><td>7500</td><td>6150</td><td>9250</td><td>13300</td></tr><tr><td>Kangaroo</td><td>15</td><td>14800</td><td>15600</td><td>10200</td><td>13000</td><td>12400</td><td>16200</td><td>16800</td></tr><tr><td>Pong</td><td>-20</td><td>21</td><td>21</td><td>21</td><td>21</td><td>21</td><td>21</td><td>21</td></tr><tr><td>Q*bert</td><td>45</td><td>19425</td><td>11700</td><td>5625</td><td>19200</td><td>18900</td><td>24325</td><td>25075</td></tr><tr><td>Seaquest</td><td>20</td><td>7440</td><td>8300</td><td>740</td><td>15830</td><td>14860</td><td>9100</td><td>26990</td></tr><tr><td>Skiing</td><td>-15997</td><td>-5987</td><td>-6505</td><td>-6267</td><td>-5359</td><td>-5495</td><td>-5368</td><td>-5612</td></tr><tr><td>Space Inv.</td><td>53</td><td>2435</td><td>2205</td><td>2460</td><td>2290</td><td>2030</td><td>2490</td><td>3790</td></tr><tr><td>Tennis</td><td>-23</td><td>8</td><td>1</td><td>-1</td><td>4</td><td>-1</td><td>24</td><td>36</td></tr><tr><td>Time Pilot</td><td>730</td><td>11900</td><td>15500</td><td>12500</td><td>12200</td><td>16300</td><td>72000</td><td>28000</td></tr><tr><td>Tutankham</td><td>4</td><td>249</td><td>267</td><td>267</td><td>274</td><td>397</td><td>334</td><td>309</td></tr><tr><td>VideoPinb.</td><td>7599</td><td>998535</td><td>950250</td><td>338512</td><td>991669</td><td>322655</td><td>997952</td><td>998324</td></tr></table>
|
| 315 |
+
|
| 316 |
+
Table 6: Maximum obtained scores (with std. dev.) of rigid baselines (i.e. DQN and DDQN with Leaky ReLU, DQN with SiLU and SiLU + dSiLU), as well as DQN with full plasticity (i.e. using rational activation functions) and regularised plasticity (i.e. using joint-rational ones) on Atari 2600 games, averaged over 5 seeded reruns (larger values are better).
|
| 317 |
+
|
| 318 |
+
# Human scores used for normalisation:
|
| 319 |
+
|
| 320 |
+
Asterix: 7536, Battlezone: 33030, Breakout: 27.9, Enduro: 740.2, Jamesbond: 368.5, Kangaroo: 2739, Pong: 15.5, Q*bert: 12085, Seaquest: 40425.8, Skiing: -3686.6, Space Invaders: 1464.9, Tennis: -6.7, Time Pilot: 5650, Tutankham: 138.3, Video Pinball: 15641.1
|
| 321 |
+
|
| 322 |
+
# A.4 EVOLUTION OF THE SCORES ON EVERY GAME
|
| 323 |
+
|
| 324 |
+
The main part present some graphs that compares performance evolutions of the Rainbow and DQN agents with plasticity, as well as Rigid DQN, DDQN and Rainbow agents. We here provide the evolution of the scores of every tested DQN and the DDQN agents on the complete game set. DQN agents with higher plasticity are always the best-performing ones. Experiments on several games (e.g. Jamesbond, Seaquest) show that using DDQN does not prevent the performance drop but only delays it.
|
| 325 |
+
|
| 326 |
+

|
| 327 |
+
|
| 328 |
+

|
| 329 |
+
|
| 330 |
+

|
| 331 |
+
|
| 332 |
+

|
| 333 |
+
|
| 334 |
+

|
| 335 |
+
|
| 336 |
+

|
| 337 |
+
|
| 338 |
+

|
| 339 |
+
|
| 340 |
+

|
| 341 |
+
|
| 342 |
+

|
| 343 |
+
|
| 344 |
+

|
| 345 |
+
|
| 346 |
+

|
| 347 |
+
|
| 348 |
+

|
| 349 |
+
|
| 350 |
+

|
| 351 |
+
Figure 7: Smoothed (cf. Eq. 3) evolutions of the scores on every tested game for DQN agents with full (i.e. using rational activation functions) and regularised (i.e. using joint-rational ones) plasticity, and original DQN agents using Leaky ReLU, SiLU and SiLU+dSiLU, as well as for DDQN agents with Leaky ReLU.
|
| 352 |
+
|
| 353 |
+

|
| 354 |
+
|
| 355 |
+

|
| 356 |
+
|
| 357 |
+
# A.5 ENVIRONMENTS TYPES: STATIONARY, DYNAMICS AND PROGRESSIVE
|
| 358 |
+
|
| 359 |
+
The used environments have been separated in 3 categories, describing their potential changes through agents learning. This categorisation is here illustrated with frames of the tested games. As one can see: Breakout, Kangaroo, Pong, Skiing, Space Invaders, Tennis, Tutankham and VideoPinball can be categorised as stationary environment, as changes are minimal for the agents in these games. Asterix, BattleZone, Q\*bert and Enduro present environment changes, that are early reached by the playing agents, and are thus dynamic environments. Finally, Jamesbond, Seaquest and Time Pilot correspond to progressive environments, as the agents needs to master early changes to access new parts of these environments.
|
| 360 |
+
|
| 361 |
+

|
| 362 |
+
Figure 8: Images extracted from DQN agents with full plasticity playing the set of 15 Atari 2600 games used in this paper. Stationary environments (e.g. Pong, Video Pinball) do not evolve during training, dynamic ones provide different input/output distributions that are early accessible in the game (e.g Q\*bert, Enduro) and progressive ones (e.g. Jamesbond, Time Pilot) require the agent to improve for the it to evolve.
|
| 363 |
+
|
| 364 |
+
# A.6 LEARNED RATIONAL ACTIVATION FUNCTIONS
|
| 365 |
+
|
| 366 |
+
We have explained in the main text how rational functions of agents used on different games can exhibit different complexities. This section provides the learned parametric rational functions learned by DQN agents with full plasticity (left) and by those with regularised plasticity (right) after convergence for every different tested game of the gym Atari 2600 environment. Kernel Density Estimations (with Gaussian kernels) of input distributions indicates where the functions are most activated. Rational functions from agents trained on simpler games (e.g. Enduro, Pong, Q*bert) have simpler profiles (i.e. fewer distinct extremas).
|
| 367 |
+
|
| 368 |
+

|
| 369 |
+
|
| 370 |
+

|
| 371 |
+
Figure 9: Profiles (dark blue) and input distributions (light blue) of rational functions (left) and joint-rational ones (right) of DQN agents on the different tested games. (Joint-)rational functions from agents of simpler games have simpler profiles (i.e. fewer distinct extrema).
|
| 372 |
+
|
| 373 |
+
# A.7 EVOLUTION OF RATIONALS ON THE PERM-MNIST CONTINUAL LEARNING EXPERIMENT
|
| 374 |
+
|
| 375 |
+
Figure 10 depicts the evolutions of rational functions through the permuted MNIST experiment. One can see that while the function of the first layer remains stable through the successive datasets, the second one flattens at its most activated region (around 0), while the third one increases its slope in this region, leading to higher gradients. This suggests that rational functions can help adapting the gradient scales at each layer. Further investigating this is an interesting line of future work.
|
| 376 |
+
|
| 377 |
+

|
| 378 |
+
Evolution of the rational functions on the Permuted MNIST experiment
|
| 379 |
+
|
| 380 |
+

|
| 381 |
+
|
| 382 |
+

|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
|
| 386 |
+

|
| 387 |
+
|
| 388 |
+

|
| 389 |
+
|
| 390 |
+

|
| 391 |
+
|
| 392 |
+

|
| 393 |
+
|
| 394 |
+

|
| 395 |
+
|
| 396 |
+
Figure 10: Evolution of the rational activation functions on the permuted MNIST experiment (cf. 4). The 3 rational activation functions used for training (and retraining) are adapting to fit the data (depicted in semi transparent).
|
| 397 |
+

|
| 398 |
+
MNIST PERM1 PERM2
|
| 399 |
+
|
| 400 |
+

|
| 401 |
+
|
| 402 |
+

|
| 403 |
+
|
| 404 |
+
# A.8 TECHNICAL DETAILS TO REPRODUCE THE EXPERIMENTS
|
| 405 |
+
|
| 406 |
+
We here provide details on our experiments for reproducibility. We used the seed 0, 1, 2, 3, 4 for every multi-seed experiment.
|
| 407 |
+
|
| 408 |
+
# SUPERVISED LEARNING EXPERIMENTS
|
| 409 |
+
|
| 410 |
+
For the lesioning experiment, we used an available<sup>5</sup> pretrained Residual Network. We then remove the corresponding block (and potentially replace it with an identity initialised rational activation function) (surged). We finetune the new models, allowing for optimisation of the previous and next layers (and potentially the rational function) for 15 epochs with SGD (learning rate of 0.001).
|
| 411 |
+
|
| 412 |
+
For the classification experiments, we run on CIFAR10 and CIFAR100 (Krizhevsky et al., MIT License), we let every network learn for 60 epochs. We use the code provided by Molina et al. (2020), with only one classification layer in these smaller VGG versions (VGG4, VGG6 and VGG8, against 3 for VGG16 and larger). We use SGD as the optimisation algorithm, with a learning rate of 0.02 and 128 as batch size. The VGG networks contain successive VGG blocks that all consist of $n$ convolutional layers, $i$ input channels and $o$ output channels, stride 3 and padding 1, followed by an activation function, and 1 Max Pooling layer. For each used architecture, the $(n, i, o)$ parameters of the successive blocks are:
|
| 413 |
+
|
| 414 |
+
VGG4: $(1,3,64)\to (1,64,128)\to (2,128,256)$
|
| 415 |
+
VGG6: $(1,3,64)\to (1,64,128)\to (2,128,256)\to (2,256,512)$
|
| 416 |
+
VGG8: $(1,3,64)\to (1,64,128)\to (2,128,256)\to (2,256,512)\to (2,512,512)$
|
| 417 |
+
|
| 418 |
+
The output of these blocks is then passed on to a classifier (linear layer). Only activation functions differ between the Leaky ReLU and the Rational versions.
|
| 419 |
+
|
| 420 |
+
# REINFORCEMENT LEARNING EXPERIMENTS
|
| 421 |
+
|
| 422 |
+
To ease the reproducibility of our the reinforcement learning experiments, we used the Mushroom RL library (D'Eramo et al., 2020) on the Arcade Learning Environment (GNU General Public License). We used states consisting of 4 consecutive grey-scaled images, downsampled to $84 \times 84$ . Computing the gradients for rational functions takes longer than e.g. ReLU. However, we used a CUDA optimized implementation of the rational activation functions that we open source along with this paper. In practice, we did not notice any significant training time difference.
|
| 423 |
+
|
| 424 |
+
Network Architecture. The input to the network is thus a $84 \times 84 \times 4$ tensor containing a rescaled, and gray-scaled, version of the last four frames. The first convolution layer convolves the input with 32 filters of size 8 (stride 4), the second layer has 64 layers of size 4 (stride 2), the final convolution layer has 64 filters of size 3 (stride 1). This is followed by a fully-connected hidden layer of 512 units. All these layers are separated by the corresponding activation functions (either Leaky ReLU, SiLU, SiLU for convolution layers and dSiLU for linear ones, PELU, rational functions (at each layer) and joint-rational ones (shared accross layers) of order $m = 5$ and $n = 4$ , initialised to approximate Leaky ReLU). We used the default PeLU initial hyperparameters $(a = 1, b = 1, c = 1)$ and let the weights optimizer tune them through training, as for rational functions. For CRELU, we took the implementation from ML Compiled<sup>6</sup>, and halves the number of filters in the following convolutional layers to keep the same network structure intact, as done by Shang et al. (2016).
|
| 425 |
+
|
| 426 |
+
Hyper-parameters. We evaluate the agents every $250\mathrm{K}$ steps, for $125\mathrm{K}$ steps. The target network is updated every $10\mathrm{K}$ steps, with a replay buffer memory of initial size $50\mathrm{K}$ , and maximum size $500\mathrm{K}$ , except for Pong, for which all these values are divided by 10. The discount factor $\gamma$ is set to 0.99 and the learning rate is 0.00025. We do not select the best policy among seeds between epochs. We use the simple $\epsilon$ -greedy exploration policy, with the $\epsilon$ decreasing linearly from 1 to 0.1 over 1M steps, and an $\epsilon$ of 0.05 is used for testing.
|
| 427 |
+
|
| 428 |
+
The only difference from the evaluation of Mnih et al. (2015) and of van Hasselt et al. (2016) evaluation is the use of the Adam optimiser instead of RMSProp, for every evaluated agent.
|
| 429 |
+
|
| 430 |
+
Normalisation techniques. To compute human normalised scores, we used the following equation:
|
| 431 |
+
|
| 432 |
+
$$
|
| 433 |
+
\operatorname {s c o r e} _ {\text {n o r m a l i s e d}} = 1 0 0 \times \frac {\operatorname {s c o r e} _ {\text {a g e n t}} - \operatorname {s c o r e} _ {\text {r a n d o m}}}{\operatorname {s c o r e} _ {\text {h u m a n}} - \operatorname {s c o r e} _ {\text {r a n d o m}}}, \tag {2}
|
| 434 |
+
$$
|
| 435 |
+
|
| 436 |
+
For readability, the curves plotted in the Fig. 4 and Fig. 8 are smoothed following:
|
| 437 |
+
|
| 438 |
+
$$
|
| 439 |
+
\operatorname {s c o r e} _ {t} = \alpha \times \operatorname {s c o r e} _ {t - 1} + (1 - \alpha) \times \operatorname {s c o r e s} _ {t}, \tag {3}
|
| 440 |
+
$$
|
| 441 |
+
|
| 442 |
+
with $\alpha = 0.9$
|
| 443 |
+
|
| 444 |
+
Overestimation computation. We used the following formulae to compute relative overestimation.
|
| 445 |
+
|
| 446 |
+
$$
|
| 447 |
+
\text {o v e r e s t i m a t i o n} = \frac {\mathrm {Q} - \text {v a l u e} - R}{R} \tag {4}
|
| 448 |
+
$$
|
| 449 |
+
|
| 450 |
+
# RL NETWORK ARCHITECTURE
|
| 451 |
+
|
| 452 |
+
The DQN, DDQN and Rainbow agents networks architecture, rational plasticity (with rational activations functions at each layer) and of the regularized ones (with one joint-rational activation function shared across layers). For the other activation functions, the "Rat." blocks are replaced with Leaky ReLU, CReLU, SiLU, or PELU. For the d+SiLU networks, SiLU is used on the convolutional layers (i.e. first two), and dSiLU in the fully connected ones (i.e. last two).
|
| 453 |
+
|
| 454 |
+

|
| 455 |
+
Figure 11: left: The DQN agents' neural network equipped with Rational Activation Functions (Rat.). Any other network with classical activation functions (as Leaky Relu or SiLU) would be similar, with the corresponding activation function instead of the rational one. right: The agents' network using the regularized joint-rational version of the network. The same activation is used across the layers. The parameters of the rational activation (in orange) function are shared. In both graphs, operations are placed in the grey boxes and parameters in the blue ones, (or orange for the rationals' ones).
|
| 456 |
+
|
| 457 |
+

|
adaptiverationalactivationstoboostdeepreinforcementlearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dbba9b026c6612098a5b2e712978d470fb7674cb0cf345adb8062b3ce5d12217
|
| 3 |
+
size 1655322
|
adaptiverationalactivationstoboostdeepreinforcementlearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b6969a450f52f01d5077dd7d34fe25bdba14e8ce1776e789aba955ef157539f4
|
| 3 |
+
size 589110
|
addressingsignaldelayindeepreinforcementlearning/30c95151-f2de-4045-ab49-75e38047ad97_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e71ef44961d8a7d4ad14c41b3812c71cb7d62c34c11cb460a87633d216d28e56
|
| 3 |
+
size 159938
|
addressingsignaldelayindeepreinforcementlearning/30c95151-f2de-4045-ab49-75e38047ad97_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0c9ae28f331c8917eccb2521e509d45038c812c9f06e9920db672b94bdb824d9
|
| 3 |
+
size 194579
|
addressingsignaldelayindeepreinforcementlearning/30c95151-f2de-4045-ab49-75e38047ad97_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:786771a91bc36e9f37e3fe6bc02c16f4065b9037d8ea06b4badbbb3ad91f7c7b
|
| 3 |
+
size 6779974
|
addressingsignaldelayindeepreinforcementlearning/full.md
ADDED
|
@@ -0,0 +1,665 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ADDRESS SIGNAL DELAY IN DEEP REINFORCEMENT LEARNING
|
| 2 |
+
|
| 3 |
+
Wei Wang $^{1}$ Dongqi Han $^{2}$ Xufang Luo $^{2}$ Dongsheng Li $^{2}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>Western University, Canada <sup>2</sup>Microsoft Research Asia
|
| 6 |
+
|
| 7 |
+
waybaba2ww@gmail.com,{dongqihan, xufluo, dongsli}@microsoft.com
|
| 8 |
+
|
| 9 |
+
# ABSTRACT
|
| 10 |
+
|
| 11 |
+
Despite the notable advancements in deep reinforcement learning (DRL) in recent years, a prevalent issue that is often overlooked is the impact of signal delay. Signal delay occurs when there is a lag between an agent's perception of the environment and its corresponding actions. In this paper, we first formalize delayed-observation Markov decision processes (DOMDP) by extending the standard MDP framework to incorporate signal delays. Next, we elucidate the challenges posed by the presence of signal delay in DRL, showing that trivial DRL algorithms and generic methods for partially observable tasks suffer greatly from delays. Lastly, we propose effective strategies to overcome these challenges. Our methods achieve remarkable performance in continuous robotic control tasks with large delays, yielding results comparable to those in non-delayed cases. Overall, our work contributes to a deeper understanding of DRL in the presence of signal delays and introduces novel approaches to address the associated challenges.
|
| 12 |
+
|
| 13 |
+
# 1 INTRODUCTION
|
| 14 |
+
|
| 15 |
+
Deep reinforcement learning (DRL) and its applications have undergone rapid development in recent years (Sutton & Barto, 1998). The success of DRL has been witnessed not only in virtual tasks like videos games (Vinyals et al., 2019) and simulated robotic environments (Haarnoja et al., 2018a), but also in many challenging real-world tasks such as controlling tokamaks (Degrave et al., 2022) and tuning language models with human feedback (Schulman et al., 2017; Brown et al., 2020).
|
| 16 |
+
|
| 17 |
+
However, an often existed problem has long been ignored in deep RL studies, that is the delay of signals, i.e., the agent may not immediately observe current environmental state, or the agent's action cannot immediately act on the environment. Signal delay exists widely in various practical applications. For example, in autonomous vehicle navigation (Jafaripournimchahi et al., 2022), delayed feedback can occur due to real-world constraints, such as network latency or sensor processing time. In finance (Fang et al., 2021), high-frequency trading algorithms may experience delays in receiving information about market conditions due to network congestion or data processing bottlenecks. In robotics, the communication latency between a robot's sensors, actuators, and control systems can lead to delayed responses (Abadia et al., 2021). Additionally, in medical applications, such as telemedicine or remote surgery (Meng et al., 2004), there can be a delay between receiving patient data and the actual execution of the required actions. Last but not least, sometimes even the delay is short (e.g., 1 ms due to neural network inference), the environment may have already changed a lot, e.g., when controlling a Tokamak (characteristic timescale of 0.1 ms) (Degrave et al., 2022). These delays pose a significant impact on the effectiveness of deep RL-based solutions, necessitating urgent research to address this challenge.
|
| 18 |
+
|
| 19 |
+
On the other hand, signal delay is also a critical issue in biological systems. For instance, neural signals in humans take approximately $150\mathrm{ms}$ (Gerwig et al., 2005) to propagate from the brain to the muscles (efferent delay) and from sensors to the brain (afferent delay). This delay can be significant in motor control (Bastian, 2006), considering that Usain Bolt moves for more than 10 meters and a pianist plays dozens of notes every second. As DRL is well recognized as a biologically plausible
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
(a)
|
| 23 |
+
Figure 1: (a-b) Examples and illustration of delays in real-world scenarios. Inference: The process of inference incurs time costs (e.g., human brain, GPU). Execution: Time lags occur when signals travel to their point of execution (e.g., hand, robotic arm). Perception: Processing raw signals involves processing time (e.g., from eye to brain, sensor to processor). Transmission: Implementing actions often entails transmission delays to the intended environment (e.g., online gaming, remote control scenarios). (c) Even a few steps of delay significantly deteriorate the performance of SAC.
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
(b)
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
(c)
|
| 30 |
+
|
| 31 |
+
framework for modeling control and behavior (Botvinick et al., 2020), addressing signal delay in DRL could shed light on the corresponding neural mechanisms in animals and humans (Stein, 2009).
|
| 32 |
+
|
| 33 |
+
However, given the wide existence of signal delay and its importance, there is surprisingly few studies try to address this problem in DRL – while there have been extensive studies on delayed feedback in control theory and bandit problem, the only study, to our knowledge, explicitly touched this problem in deep reinforcement learning is from Chen et. al. (Chen et al., 2021). However, they make a strong assumption of knowing the reward function, which is often unknown in practice, and only discuss the case of fixed time of delay. The problem of signal delay in DRL in more general cases urges to be studied.
|
| 34 |
+
|
| 35 |
+
The current study aims to address the problem of deep RL with delay (DRLwD), particularly in continuous control tasks since most applications require continuous action space, such as robotics, autonomous driving, remote surgery, and modeling biological motor actions. The primary contributions of this work are summarized as follows. First, we investigate the impact of delay on performance and provide empirical evidence demonstrating its significant effect through comprehensive experiments. Next, we provide a mathematical formulation for the delay problem, encompassing both action and observation delays, and derive theoretical insights to devise an effective solution. Then, building on our suggested insights, we examine a range of ideas to mitigate or overcome the key challenges and provide an empirical evaluation of the effectiveness of each idea. Finally, we suggest simple and general approaches for actor-critic architectures that effectively address the impacts of signal delay in DRL. Overall, our work contributes to a deeper understanding of DRL in the presence of signal delays and presents a novel model architecture to overcome the associated challenges.
|
| 36 |
+
|
| 37 |
+
# 2 PROBLEM DEFINITION
|
| 38 |
+
|
| 39 |
+
# 2.1 BACKGROUND: PARTIALLY OBSERVABLE MDP
|
| 40 |
+
|
| 41 |
+
A POMDP is defined as a tuple $(S, A, \mathcal{P}_0, \mathcal{T}, X, \mathcal{O}, \gamma)$ , where $S$ and $A$ are the state and action spaces, respectively. $\mathcal{P}_0$ specifies the initial state distribution such that $\mathcal{P}_0(s)$ is the probability of a state $s \in S$ being an initial state. $\mathcal{T}$ specifies the state transition probability such that $\mathcal{T}(s', r|s, a)$ is the probability of reaching to a new state $s' \in S$ with an immediate reward $r \in \mathbb{R}$ after taking an action $a \in \mathcal{A}$ at a state $s \in S$ . $X$ denotes the observation space. $\mathcal{O}$ specifies the observation probability such that $\mathcal{O}(x|s)$ is the probability of an observation $x \in X$ at a state $s \in S$ . $\gamma \in [0,1)$ is the discount factor.
|
| 42 |
+
|
| 43 |
+
# 2.2 DEFINING DELAYED-OBSERVATION MDP
|
| 44 |
+
|
| 45 |
+
First we consider a standard MDP without delay (referred to as the original MDP), in which state is denoted by $s$ . We extend the original MDP to incorporate observation delay (delay steps can be non-fixed). We define a Delayed observation Markov decision processes $(\mathrm{DOMDP})^1$ , as a special case of POMDP, by considering its state $\sigma = (s^{(-T)}, s^{(-T + 1)}, \dots, s^{(-1)}, s)$ , where $T$ is the maximum delay, and $s$ is the state of the original MDP. Intuitively, $s^{(-t)}$ is the state of the original MDP from $t$ steps ago, and the superscript $(-t)$ indicates the relative timestep shift so as to maintain the Markovian property. The transition probability function of a DOMDP is then defined as $\mathcal{T}(\sigma', r|\sigma, a) = \mathcal{T}_0(s', r|s, a) \prod_{t=1}^{T} \mathbb{I}(s'^{(-t)} = s^{(-t+1)})$ , where $\mathbb{I}$ denotes the indicator function, and $\mathcal{T}_0(s', r|s, a)$ is the transition probability of the original MDP. The term $\prod_{t=1}^{T} \mathbb{I}(s'^{(-t)} = s^{(-t+1)})$ transfers $s^{(-t+1)}$ at current step to $s^{(-t)}$ at next step, therefore explaining $s^{(-t)}$ as the delayed state $t$ from steps ago, without the necessity of introducing absolute time step as in a time-dependent MDP (Boyan & Littman, 2000).
|
| 46 |
+
|
| 47 |
+
Then we define the observation probability as $\mathcal{O}(\tilde{s}|\sigma) = \mathcal{O}(\tilde{s}|s^{(-T)}, s^{(-T+1)}, \ldots, s^{(-1)}, s) = \sum_{t=1}^{T} \mathcal{P}(t)\mathbb{I}(\tilde{s} = s^{(-t)})$ , where $\tilde{s}$ is the observation (delayed state), and $P(t)$ is the probability that the signal delays for $t$ steps. A simple case is that $\mathcal{P}(t) = \mathbb{I}(t = \Delta T)$ , which means the signal delay is fixed as $\Delta T$ steps, and thus $\tilde{s} = s^{(-\Delta T)}$ . So far, we have defined a DOMDP by specifying elements of a POMDP.
|
| 48 |
+
|
| 49 |
+

|
| 50 |
+
Figure 2: (a) Illustration of action and observation shifting in DOMDP. All kinds of delays (b) Delays examples, Inference Delay: the model inference time cost, Observation Delay: time cost to transition observation to the agent. Action delay: time to execute action. (c) Equivalent to the single delays with the same total durat.
|
| 51 |
+
|
| 52 |
+

|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
|
| 56 |
+
The complexity and diversity of environments can lead to various kinds of delays, such as inference delay $\Delta T^a$ , action delay $\Delta T^I$ , and observation delay $\Delta T^s$ , as showed in Fig. 2(b). largely adding to the intricacy of the problem. Thankfully, all these types of delays are equivalent. Only the sum of them, denoted by $\Delta T$ , impacts the decision-making. This realization paves the way for a streamlined approach to modeling the problem. We proceed to establish this through the following theorem (Katsikopoulos & Engelbrecht, 2003):
|
| 57 |
+
|
| 58 |
+
Theorem 2.1 (Delay Equivalence). For an agent employing policy $\pi(a|\cdot)$ at time step $t$ , the impact of state delay $\Delta T^s$ , $\Delta T^I$ and $\Delta T^a$ on the observation transition $\tilde{\mathcal{P}}(\tilde{s}_{t+1}|\tilde{s}_t)$ is equivalent. In other words, as long as $\Delta T$ is the same, the agent perceives different variants of $\Delta T^s$ , $\Delta T_I$ and $\Delta T_a$ values as identical. This conclusion holds for both the fixed and unfixed delay. (Proof in Appendix).
|
| 59 |
+
|
| 60 |
+
Consequently, we focus on the DOMDP in right side of Fig. 2b for the sake of notational simplicity and comprehension. Specifically, in Fig. 2, the system is modeled by an ideal agent with zero inference time and zero execution time. The only delay between the state of the environment and the decision-making input of the agent is $\Delta T = \Delta T^{s} + \Delta T^{a} + \Delta T^{I}$ . Therefore, we consider observation delay in this work without loss of generality.
|
| 61 |
+
|
| 62 |
+
# 3 CHALLENGE OF DRL WITH DELAYED SIGNALS
|
| 63 |
+
|
| 64 |
+
Standard DRL and general POMDP algorithms exhibit catastrophic failure with signal delay. Now we empirically examine the performance of existing RL algorithms straightforwardly applying to environments subjected to various delays on four MuJoCo environments. The algorithms include deep deterministic policy gradient (DDPG) (Lillicrap et al., 2015), twin-delayed DDPG (TD3) (Fujimoto et al., 2018), soft actor-critic (Haarnoja et al., 2018b) and a RNN-based approach for general POMDP tasks (RNN Strong) (Ni et al., 2022). Fig. 3 shows the normalized results of the experiments, in which the reported scores are proportionate to the maximum attainable score for each environment of SAC without delay. It reveals that even a modest delay of one environment step substantially undermines the performance of the all these algorithms. Delays surpassing 4 steps lead to catastrophic failure. These results underscore the considerable adverse impact delays can have on the efficacy of DRL algorithms when being directly applied. In the follows, we try to figure out where the difficulties of DRLwD stem from.
|
| 65 |
+
|
| 66 |
+

|
| 67 |
+
(a) # Delay = 1
|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
(b) # Delay = 4
|
| 71 |
+
|
| 72 |
+

|
| 73 |
+
(c) # Delay = 8
|
| 74 |
+
Figure 3: Task-averaged performance in MuJoCo environments (Ant-v4, HalfCheetah-v4, Hopperv4, Walker2d-v4) under various delay steps for state-of-the-art algorithms. SAC (No delay) acts as the benchmark for optimal performance. The red dashed line and shaded area represent the mean and standard deviation of our method's best-performing variants, serving as a benchmark.
|
| 75 |
+
|
| 76 |
+

|
| 77 |
+
(d) # Delay = 12
|
| 78 |
+
|
| 79 |
+
Misalignment between observed and true state Fig. 4 illustrates that when observation signal delays occur, the agent's actual state can differ from its observed state, leading to varying optimal actions. Addressing this misalignment between the current observation (delayed signal) and true state (non-delayed) is a central challenge in DRLwD for optimal decisions. One straightforward solution is to estimate the true state based on available information, such as the delayed signal and previous actions. This would transform the problem to a standard MDP task using the estimated true state. However, accurate estimation can be difficult due to several factors, including complex state transitions, stochastic environmental state transitions, and data distribution shifts (Pan & Yang, 2010) during the process of online RL as the agent improves its
|
| 80 |
+
|
| 81 |
+
policy (Quinonero-Candela et al., 2008). In light of these challenges, alternative approaches are needed to address the problem. The following section will discuss various methods, including our proposed model, and empirically evaluate their effectiveness in handling the delay issue.
|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
Figure 4: Varying optimal actions between actual state and observed state.
|
| 85 |
+
|
| 86 |
+
# 4 DEVELOPING ALGORTHMIC FRAMEWORK FOR DOMDP
|
| 87 |
+
|
| 88 |
+
Building on the definition and discussion presented earlier, our goal is to develop an algorithm that effectively addresses the DOMDP problem. However, our objective extends beyond merely creating a standalone algorithm specifically for DOMDP. Instead, we aim to explore a versatile framework and components that can be integrated with current or future algorithms to handle DOMDP problems effectively. This approach will lay a foundation that facilitates the seamless integration of future
|
| 89 |
+
|
| 90 |
+
developments in reinforcement learning with existing strategies. Considering the current state-of-the-art actor-critic methods has demonstrated success in continuous control tasks (Lillicrap et al., 2015; Haarnoja et al., 2018a; Fujimoto et al., 2018), we ground our work on actor-critic architecture.
|
| 91 |
+
|
| 92 |
+
The rest of this section is organized as follows: We first investigate the input design of the critic (Sec. 4.1) and the actor (Sec. 4.2). Subsequently, we explore other potential training techniques that show promise in improving performance under DOMDP scenarios (Sec. 4.3).
|
| 93 |
+
|
| 94 |
+

|
| 95 |
+
(a)
|
| 96 |
+
|
| 97 |
+

|
| 98 |
+
|
| 99 |
+

|
| 100 |
+
(b)
|
| 101 |
+
|
| 102 |
+

|
| 103 |
+
|
| 104 |
+

|
| 105 |
+
(c)
|
| 106 |
+
|
| 107 |
+

|
| 108 |
+
Figure 5: Diagrams of the Proposed Methods: (a) Delay-Reconciled Training for Critic: Utilizes only the online side during the deployment stage. The critic would use non-delayed information after recoiling in offline stage. (b) State Augmentation for Actor: Considers three methods for input $\bar{s}_t$ with historical information - MLP, RNN, and Transformer. For MLP, historical actions $a_{t - \Delta T:t - 1}$ are concatenated with the state dimension. In RNN, the input consists of the current state and last-step action. In the case of the Transformer, the input includes state and action $\{s_i,a_{i - 1}\}_{t - \Delta T:t - 1}$ information spanning $\Delta T$ steps. (c) Complementary Techniques for DOMDP Resolution: Employs prediction/encoding auxiliary loss to aid in representation learning. An encoder first encodes the $\bar{s}_t$ into the hidden feature $z_{t}$ , and then a decoder decodes it to $\hat{s}_t$ . Prediction refers to using the predicted results $\hat{s}_t$ as the input of the actor, while the Encoding method utilizes features $z_{t}$ in the hidden space. For both methods, two variants are considered; Detach indicates stop of gradients backpropagation (with the notation of $\dagger$ ).
|
| 109 |
+
|
| 110 |
+
# 4.1 DELAY-RECONCILED TRAINING FOR CRITIC
|
| 111 |
+
|
| 112 |
+
Estimating value functions is the most basic requirement for actor-critic methods. With time-shifted observations, a question naturally arises: how can we accurately estimate the state value, i.e., train the critic, under such circumstances? Contrary to standard POMDP problems, a crucial insight for DOMDPs is that full observation and reward is available during offline training once the delay is reconciled following real-time inference. In addition, as depicted in Fig. 5a, the forward pass of the critic is not required during the inference stage when interacting with the environment.
|
| 113 |
+
|
| 114 |
+
These facts motivate us to design a post-processing recovery pipeline that first recovers the historical information without delay, and then uses it for critic training. For instance, in delay-affected scenarios such as online gaming or trading, we can time-calibrate the historical data to recover the trajectory without delay. While this information is not available during inference, it can still be employed for critic training, thanks to the actor-critic structure. Similar techniques can be found in
|
| 115 |
+
|
| 116 |
+
Although previous studies, such as those by Vinyals et al. (2017); Baisero et al. (2022); Foerster et al. (2018), have demonstrated the efficacy of an asymmetric observation actor-critic paradigm. Research also indicates that an asymmetric input design may lead to a non-synchronous problem and gradient bias, as discussed by Baisero & Amato (2021). To further explore this issue, we include a comparison with a symmetric design in our study. In this alternative approach, the critic receives the same input as the actor, named with "Symmetric" as the prefix in the following context.
|
| 117 |
+
|
| 118 |
+
# 4.2 STATE AUGMENTATION FOR ACTOR
|
| 119 |
+
|
| 120 |
+
In the preceding section, we introduced a historical recovery pipeline for critic training. A similar approach, however, is not applicable to the actor, as the oracle $s_t$ is unavailable during the inference stage. This section explores potential ways to augment the actor's input to facilitate training.
|
| 121 |
+
|
| 122 |
+
Reviewing established POMDP algorithms, a common strategy, given the actor's inability to access the oracle state $s_t$ during inference, is to provide the actor with supplementary input to aid in the recovery of $\tilde{s}_{<t}$ . These inputs typically consist of historical environmental observations such as opponents' behavior. However, it's evident that the historical state $\tilde{s}_{<t}$ is not informative in DOMDPs, as $I(s_t, \tilde{s}_{<t} | \tilde{s}_t) = 0$ , assuming a Markovian environment, where $I(\cdot, \cdot)$ denotes mutual information.
|
| 123 |
+
|
| 124 |
+
So, aside from the historical state, what other information could aid in recovering the oracle state $s_t$ ? Intuitively, in DOMDP, as the oracle state $s_t$ is beyond $\tilde{s}_t$ , the most significant transition influence is likely the actions taken during $s_t$ and $\tilde{s}_t$ , denoted $\tilde{a}_{t - \Delta T:t}$ . The following theorem (Katsikopoulos & Engelbrecht, 2003) proves the necessity of incorporating $\tilde{a}_{t - \Delta T:t}$ into the agent's input.
|
| 125 |
+
|
| 126 |
+
Theorem 4.1 (Markovian Property). Integrating historical actions $a_{<t}$ into the observation transforms it into an MDP with state transition probability $\bar{\mathcal{P}}(\bar{s}_{t+1} | \bar{s}_t, a_t)$ , where $\bar{s}_t = (\tilde{s}_t, a_{t-\Delta T:t-1})$ .
|
| 127 |
+
|
| 128 |
+
The proof is deferred to Appendix D.2. The theoretical analysis suggests that in DOMDPs, the oracle state $s_{t_0}$ is independent of $\tilde{s}_{t < t_0}$ given $\tilde{s}_{t_0}$ , i.e., $s_{t_0} \perp \tilde{s}_{t < t_0} | \tilde{s}_{t_0}$ . Additionally, it discloses a strong association between the oracle state $s_t$ and historical action $a_{t - \Delta T:t}$ . In conclusion, incorporating historical actions into the observation allows us to convert a DOMDP to a MDP, which is a fundamental requirement for the convergence of many methods.
|
| 129 |
+
|
| 130 |
+
Next, we investigate two strategies to incorporate historical information into the actor, as in Fig. 5b: 1) MLP Encoder: Concatenating historical action data $a_{t - \Delta T:t}$ with $\tilde{s}_{t_0}$ ; 2) RNN Encoder: Adding only the last action $a_{t-1}$ , more efficient for handling large action spaces or vast histories.
|
| 131 |
+
|
| 132 |
+
# 4.3 INVESTIGATION OF COMPLEMENTARY TECHNIQUES FOR DOMDP SOLUTION
|
| 133 |
+
|
| 134 |
+
Previous research (Igl et al., 2018; Subramanian et al., 2022; Lambrechts et al., 2023) shows that auxiliary loss can improve RL agent performance. Expanding on this and the Delay-Reconciled Training for Critic, our work introduces techniques beyond input design to enhance our framework. We aim to enable the actor to "imagining" the true state $s_t$ from delayed observations $\tilde{s}_t$ and actions $a_{t - \Delta T:t}$ . Currently, the actor's learning relies on the critic's value estimations. To improve this, we propose two strategies using oracle observations for additional supervision.
|
| 135 |
+
|
| 136 |
+
Prediction Refer to Fig. 5c left side, a prediction network is trained, using $\bar{s}_t$ as input and $\hat{s}_t$ as output. The loss between the predicted state $\hat{s}_t$ and the oracle state $s_t$ is minimized. The prediction results are then utilized as actor input. Two variants were tested: one detaches (indicated with $\dagger$ ) the output of the observation prediction network before inputting it to the policy network $\pi(\cdot)$ , and the other does not. Detaching the variables can stabilize the interaction between prediction and policy networks while maintaining the connection potentially enhances supervision learning.
|
| 137 |
+
|
| 138 |
+
Encoding As depicted in Fig. 5c Right, a prediction network is trained to generate hidden features $z_{t}$ . These features, $\tilde{z}_{t}$ , serve as the input to the policy network $\pi(\cdot)$ . This approach may be particularly effective when the original observation is hard to accurately predict, or when information is sparse.
|
| 139 |
+
|
| 140 |
+
# 5 EXPERIMENTAL RESULTS
|
| 141 |
+
|
| 142 |
+
We performed our experimental evaluations across MuJoCo environments (Todorov et al., 2012) with signal delay. We compare to popular RL algorithms for continuous including DDPG (Lillicrap et al., 2015), TD3 (Fujimoto et al., 2018), SAC (Haarnoja et al., 2018b); and DRL algorithms for POMDP including RNN-based (Ni et al., 2022) and belief-state-based (Han et al., 2020) (VRM); as well as DATS (Chen et al., 2021), a model-based algorithm for DRLwD (Tab. 1). We examine four distinct environment settings: (1) Fixed Delay, (2) Unfixed Delay, (3) Probabilistic State Transition (4) Large State Space. (1-3) include Ant-v4, Walker2d-v4, Hopper-v4 and HalfCheetah-v4.
|
| 143 |
+
|
| 144 |
+
Implementation Details are provided in the appendix, covering the environment setup (Sec. E.1), implementation of baselines from other research (Sec. E.2), our network architecture (Sec. E.3), hyperparameter selection (Sec. E.4), implementation of delayed environments (Sec. E.5), our code framework (Sec. E.6), and additional details (Sec. E.7).
|
| 145 |
+
|
| 146 |
+
# 5.1 EMPIRICAL EVALUATION OF DESIGNS
|
| 147 |
+
|
| 148 |
+
Delay-Reconciled Training for Critic The learning curve associated with the traditional method exhibits a plateau (Fig. 3), indicating that the learning process is not effectively progressing. However, when we apply the new design for the critic, the learning curves for both fixed (Fig. 6c) and unfixed delay scenarios (Fig. 6e) display a more dynamic and promising trend. This indicates that our Delay-Reconciled Training design contributes to a more progressive learning process. Furthermore, as demonstrated in Fig. 6a,b, the performance of Delay-Reconciled Training for the Critic consistently surpasses that of the Vanilla SAC when delay is introduced, and this performance gap widens as the delay increases. This result underscores the robustness of our design for the critic.
|
| 149 |
+
|
| 150 |
+
State Augmentation for Actor The aforementioned results demonstrate the effectiveness of our critic input design. Building upon this, we further apply state augmentation for the actor, based on the Delay-Reconciled Training for Critic. As shown in Fig. 6c,e, the learning curve of the model trained solely with Delay-Reconciled Training for Critic plateaus when the delay exceeds 8 steps. However, after incorporating state augmentation for the actor, the learning curve becomes more positive, as depicted in Fig. 6d,f. Moreover, the performance of the state augmentation for the actor consistently exceeds that of the model trained solely with Delay-Reconciled Training for Critic (Fig. 6a,b). As shown in Tab. 1, the final average improvement is $1.2\% \to 50.8\% \to 75.9\%$ on fixed delay and $8.4\% \to 48.1\% \to 77.0\%$ on unfixed delay. These results suggest that both techniques contribute to consistent performance gain and can be combined for further enhancement.
|
| 151 |
+
|
| 152 |
+
Complementary Techniques. As evident from Tab. 1, the most effective performance for each delay predominantly arises from algorithms employing Prediction and Encoding techniques. Specifically, when the delay is fixed and the delay step exceeds 1, the top-performing algorithms consistently involve Encoding $\dagger$ and Prediction $\dagger$ . On average, Encoding $\dagger$ and Prediction $\dagger$ enhance the performance of State Augmentation for Actor from $75.9\%$ to $84.5\%$ and $83.6\%$ , respectively. However, the efficacy of explicit supervision significantly diminishes when the delay is unfixed. The average performance of Encoding $\dagger$ ( $77.9\%$ ) approximates that of State Augmentation ( $77.0\%$ ). Notably, the performance of Prediction $\dagger$ ( $72.5\%$ ) even drops by $4.5\%$ compared to State Augmentation. This observation underscores that the explicit introduction of Prediction doesn’t always guarantee performance improvement. Whether the delay is fixed substantially influences the effectiveness of techniques introducing additional prediction supervision.
|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
Figure 6: Consistent performance improvement of critic and actor input design. (a) Fixed Delay (b) Unfixed Delay (c) Delay-Reconciled Training for Critic with fixed delay (d) Delay-Reconciled Training for Critic with fixed delay (e) Actor State Augmentation- MLP with unfixed delay (f) Actor State Augmentation- MLP with unfixed delay. Rewards are averaged over four MuJoCo tasks.
|
| 162 |
+
|
| 163 |
+

|
| 164 |
+
|
| 165 |
+

|
| 166 |
+
|
| 167 |
+
Table 1: Performance $(\%)$ of algorithms in fixed and unfixed delay environments. The shaded values represent results from unfixed delay environments. All values are normalized based on the environment-specific performance of Vanilla SAC with no delay*. Data are presented as mean $\pm$ standard error of the mean (S.E.M). The best performing methods, including those within the range of S.E.M of the best, are highlighted in bold. Here, $s_t$ denotes the non-delayed state, $\tilde{s}_t = s_{t - \Delta T}$ represents the delayed state, and $\bar{s}_t = (\tilde{s}_t,a_{t - \Delta T:t - 1})$ signifies the state with historical actions. $\hat{s}_t$ is the predicted state, while $z_{t}$ is the hidden feature of the prediction network. In. and Net. denote the input and network of the critic/actor, respectively.
|
| 168 |
+
|
| 169 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">Critic</td><td colspan="2">Actor</td><td colspan="13"># Delayed Time Steps</td></tr><tr><td>In.</td><td>Net.</td><td>In.</td><td>Net.</td><td>0</td><td colspan="2">1</td><td colspan="2">2</td><td colspan="2">4</td><td colspan="2">8</td><td colspan="2">12</td><td colspan="2">Avg. #≥4</td></tr><tr><td colspan="18">Existing Methods</td></tr><tr><td>Vanilla DDPG (Lillicrap et al., 2015)</td><td>\( \tilde{s}_{t} \)</td><td>MLP</td><td>\( \tilde{s}_{t} \)</td><td>MLP</td><td>49.6±12.2</td><td>13.3±8.2</td><td>15.7±10.1</td><td>5.9±6.0</td><td>2.3±12.0</td><td>-1.3±2.2</td><td>10.5±6.3</td><td>-2.2±1.4</td><td>-1.2±1.9</td><td>-1.6±1.0</td><td>-1.4±1.3</td><td>-1.7±1.5</td><td>2.6±3.2</td></tr><tr><td>Vanilla TD3 (Fujimoto et al., 2018)</td><td>\( \tilde{s}_{t} \)</td><td>MLP</td><td>\( \tilde{s}_{t} \)</td><td>MLP</td><td>96.3±10.5</td><td>22.9±6.6</td><td>59.4±9.6</td><td>10.5±3.3</td><td>36.6±8.7</td><td>7.1±3.3</td><td>16.7±4.6</td><td>6.6±3.6</td><td>8.1±3.2</td><td>6.7±3.7</td><td>7.7±3.5</td><td>6.8±3.5</td><td>10.8±3.8</td></tr><tr><td>Vanilla SAC (Haarnoja et al., 2018b)</td><td>\( \tilde{s}_{t} \)</td><td>MLP</td><td>\( \tilde{s}_{t} \)</td><td>MLP</td><td>100.0*±9.4</td><td>40.3±12.3</td><td>62.5±10.6</td><td>20.5±8.4</td><td>44.6±15.2</td><td>5.3±3.4</td><td>18.1±7.5</td><td>-1.0±2.3</td><td>5.0±3.3</td><td>-0.7±1.1</td><td>2.2±2.0</td><td>1.2±2.3</td><td>8.4±4.3</td></tr><tr><td>RNN Strong Baseline (Ni et al., 2022)</td><td>\( \tilde{s}_{t} \)</td><td>RNN</td><td>\( \tilde{s}_{t} \)</td><td>RNN</td><td>83.7±12.1</td><td>58.2±9.1</td><td>62.9±9.5</td><td>43.3±7.1</td><td>41.7±8.9</td><td>34.5±8.2</td><td>32.4±6.7</td><td>24.7±4.1</td><td>24.5±5.1</td><td>16.1±6.1</td><td>14.2±1.1</td><td>23.6±4.5</td><td>23.7±4.3</td></tr><tr><td>VRM (Han et al., 2020)</td><td>\( \tilde{s}_{t} \)</td><td>RNN</td><td>\( \tilde{s}_{t} \)</td><td>RNN</td><td>91.1±16.7</td><td>64.8±12.3</td><td>71.5±20.7</td><td>54.9±17.6</td><td>62.7±14.9</td><td>32.2±12.0</td><td>41.4±16.7</td><td>21.4±5.1</td><td>29.7±10.0</td><td>14.73±2.2</td><td>16.4±2.0</td><td>22.7±6.4</td><td>29.1±9.5</td></tr><tr><td>DATS (Chen et al., 2021)</td><td>\( \tilde{s}_{t} \)</td><td>RNN</td><td>\( \tilde{s}_{t} \)</td><td>RNN</td><td>98.7±13.4</td><td>73.9±11.3</td><td>70.9±13.2</td><td>60.7±12.7</td><td>55.0±9.7</td><td>50.3±8.2</td><td>34.7±8.1</td><td>22.9±4.7</td><td>28.4±4.2</td><td>19.1±5.1</td><td>14.3±1.1</td><td>30.8±6.0</td><td>25.8±4.5</td></tr><tr><td colspan="18">Critic Input Design</td></tr><tr><td>Delay-Reconciled Training</td><td>\( s_t \)</td><td>MLP</td><td>\( \tilde{s}_{t} \)</td><td>MLP</td><td>99.2±8.0</td><td>92.9±10.0</td><td>94.9±8.5</td><td>84.5±10.4</td><td>85.0±10.8</td><td>73.5±11.1</td><td>65.7±11.7</td><td>46.7±9.7</td><td>46.9±10.5</td><td>32.3±9.1</td><td>31.8±8.8</td><td>50.8±9.9</td><td>48.1±10.3</td></tr><tr><td colspan="18">Actor Input Design</td></tr><tr><td>State Augmentation - MLP</td><td>\( s_t \)</td><td>MLP</td><td>\( \tilde{s}_{t} \)</td><td>MLP</td><td>100.8±11.2</td><td>92.8±7.1</td><td>96.6±6.6</td><td>93.4±8.1</td><td>102.1±13.1</td><td>78.0±9.2</td><td>81.6±9.6</td><td>85.0±9.9</td><td>84.0±9.9</td><td>64.7±8.6</td><td>65.5±8.4</td><td>75.9±9.2</td><td>77.0±9.3</td></tr><tr><td>State Augmentation - RNN</td><td>\( s_t \)</td><td>MLP</td><td>\( \tilde{s}_{t} \)</td><td>RNN</td><td>84.4±11.5</td><td>71.0±10.4</td><td>72.3±12.1</td><td>66.6±13.0</td><td>60.4±11.8</td><td>37.5±10.2</td><td>43.7±10.9</td><td>24.6±7.2</td><td>22.4±4.8</td><td>21.3±5.1</td><td>15.0±3.4</td><td>27.8±7.5</td><td>27.0±6.3</td></tr><tr><td>State Augmentation - Transformer</td><td>\( s_t \)</td><td>MLP</td><td>\( \tilde{s}_{t} \)</td><td>Tran.</td><td>76.1±8.1</td><td>72.3±12.2</td><td>76.9±12.3</td><td>58.1±10.5</td><td>57.3±11.3</td><td>57.2±6.2</td><td>40.8±7.6</td><td>32.0±6.3</td><td>19.8±5.2</td><td>16.3±6.6</td><td>17.4±5.7</td><td>35.1±6.4</td><td>26.0±6.2</td></tr><tr><td colspan="18">Exploring Extended Techniques</td></tr><tr><td>Prediction†</td><td>\( s_t \)</td><td>MLP</td><td>\( \tilde{s}_{t} \)</td><td>MLP</td><td>102.1±9.7</td><td>101.5±13.9</td><td>96.1±12.2</td><td>100.5±14.4</td><td>99.0±12.5</td><td>92.2±16.2</td><td>88.6±14.4</td><td>85.6±10.8</td><td>71.0±12.8</td><td>73.0±10.3</td><td>58.1±14.1</td><td>83.6±12.5</td><td>72.5±13.8</td></tr><tr><td>Encoding†</td><td>\( s_t \)</td><td>MLP</td><td>\( z_t \)</td><td>MLP</td><td>97.6±12.1</td><td>101.4±9.0</td><td>113.0±9.6</td><td>103.2±10.0</td><td>92.0±9.5</td><td>90.4±9.5</td><td>86.6±8.2</td><td>89.9±12.4</td><td>77.9±10.8</td><td>73.3±9.9</td><td>69.2±9.7</td><td>84.5±10.6</td><td>77.9±9.6</td></tr><tr><td colspan="18">Others</td></tr><tr><td>Symmetric - MLP</td><td>\( \tilde{s}_{t} \)</td><td>MLP</td><td>\( \tilde{s}_{t} \)</td><td>MLP</td><td>106.3±9.4</td><td>75.1±9.1</td><td>72.5±8.9</td><td>63.7±9.7</td><td>61.2±9.0</td><td>51.4±11.7</td><td>52.0±9.6</td><td>37.5±13.2</td><td>31.0±7.0</td><td>27.0±12.0</td><td>28.3±5.0</td><td>38.6±12.3</td><td>37.1±7.2</td></tr></table>
|
| 170 |
+
|
| 171 |
+
# 5.2 PERFORMANCE ACROSS DIVERSE ENVIRONMENTS
|
| 172 |
+
|
| 173 |
+
Performance on Basic Environments. As shown in Tab. 1, DDPG, TD3, and SAC experience a significant performance drop of over $79.6\%$ when the 2 exceeds four time steps. Even a single-step delay can cause DDPG's performance to plummet from $49.6\%$ to $13.3\%$ (fixed delay) and $15.7\%$ (unfixed delay). TD3 and SAC exhibit a smaller yet substantial performance reduction, greater than $29.6\%$ . While the RNN Strong (Ni et al., 2022) and VRM (Han et al., 2020) can somewhat function in POMDP and enhance SAC's performance, the improvement is relatively mild. Our simple Delay-Reconciled Training for Critic can considerably boost SAC's performance, consistently outperforming RNN Strong. The actor design also contributes to consistent performance improvement. For the techniques, we observe that the detached versions always outperform their non-detached counterparts; due to space constraints, we defer these results to Tab. 4 in the Appendix. For the symmetric design of the actor and critic which both use $\tilde{s}_t$ as input. The results reveal that this symmetric design (with an average performance of $37.1\%$ ) significantly underperforms compared to the asymmetric design with delay-reconcile critic. Additional investigations delve into the possible reasons of the baseline failures are provided, including issues related to representation learning (Sec. F.1) and redundant historical information (Sec. F.2). The learning curve can be found in Sec. F.7.
|
| 174 |
+
|
| 175 |
+
Fixed v.s. Unfixed delay As shown in Tab. 1, the stability of the delay most significantly influences prediction and encoding-based methods. Under fixed delay settings, prediction-based methods maintain $79\%$ performance even with a 12-step delay, outperforming the second-best Actor State Augmentation-MLP at $61\%$ . However, in an unfixed delay setting, the performance of prediction drops to $59\%$ , falling below that of Actor State Augmentation-MLP $(79\%)$ . This implies that explicitly introducing prediction supervision can have a detrimental effect on performance, influenced by whether the delay is fixed. This aligns with the intuition that future states become more unpredictable when delay steps vary. Additional results can be found in Sec. F.3 in the appendix.
|
| 176 |
+
|
| 177 |
+
Performance under Large Observation Space and Probabilistic Environments. The size of the observation space impacts the performance of algorithms that explicitly introduce supervised learning supervision. As depicted in Fig. 7a-d and Fig. 7f, the performance of all algorithms drops significantly as the number of delay steps increases. Meanwhile, the performance of the Actor State Augmentation-MLP is less severely affected (Fig. 7e), experiencing only around a $20\%$ performance decrease when the number of delay steps reaches 12. Interestingly, when the prediction network is detached (Fig. 7b,d), the performance deteriorates even more drastically. This could possibly be attributed to the complexity of predicting true observations, leading the prediction network to generate less useful information for the policy network. Additional insights and analyses are provided in Sec. F.4 of the appendix. Probabilistic environments could also contribute to the under-performance of prediction methods, with more detailed findings available in Sec. F.5 and Sec. F.6.
|
| 178 |
+
|
| 179 |
+

|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
Figure 7: Performance when observation space is large. (a) Prediction (b) Prediction† (c) Encoding (d) Encoding† (e) Actor State Augmentation-MLP (f) Performance summary
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
|
| 190 |
+

|
| 191 |
+
|
| 192 |
+
# 5.3 TAKE-AWAYMESSAGE
|
| 193 |
+
|
| 194 |
+
We now summarize the main findings of our experiments: (1) The time recovery process presents a consistent and impactful enhancement to the performance of the Actor-Critic framework in DOMDP (Tab. 1). (2) Augmenting the observation to $\bar{s}_t$ can also consistently contribute positively towards overall performance (Tab. 1). (3) Introducing a prediction network (detached) can yield promising performance when the environment is simple (fixed part in Tab. 1). (4) Explicit prediction or model-based methods can lead to performance decrease (Tab. 1, Fig. 7) in scenarios where the environment is highly complex (large observation spaces - Fig. 7) and difficult to model (probabilistic - Fig. 7).
|
| 195 |
+
|
| 196 |
+
# 6 CONCLUSION
|
| 197 |
+
|
| 198 |
+
This work aims to address signal delay, a commonly overlooked issue in DRL (see Appendix B for a discussion about related works). We formulate DRLwD and propose effective approaches to alleviate the challenge of misalignment between observed and true states. Our research has certain limitations due to its focus on simulated robotic control environments. Dealing with signal delay in real-world applications could potentially encounter greater levels of uncertainty and additional challenges. Future work should explore real-world scenarios and transition from simulated to real environments (James et al., 2017; Kadian et al., 2020), for more discussions see Sec. C in Appendix.
|
| 199 |
+
|
| 200 |
+
# ACKNOWLEDGEMENTS
|
| 201 |
+
|
| 202 |
+
This work was supported by Microsoft Research. The authors thank Yifei Shen for insightful contributions to the discussions that greatly enhanced this research.
|
| 203 |
+
|
| 204 |
+
# REFERENCES
|
| 205 |
+
|
| 206 |
+
Ignacio Abadía, Francisco Naveros, Eduardo Ros, Richard R Carrillo, and Niceto R Luque. A cerebellar-based solution to the nondeterministic time delay problem in robotic control. Science Robotics, 6(58):eabf2756, 2021.
|
| 207 |
+
Andrea Baisero and Christopher Amato. Unbiased asymmetric reinforcement learning under partial observability. arXiv preprint arXiv:2105.11674, 2021.
|
| 208 |
+
Andrea Baisero, Brett Daley, and Christopher Amato. Asymmetric dqn for partially observable reinforcement learning. In Uncertainty in Artificial Intelligence, pp. 107-117. PMLR, 2022.
|
| 209 |
+
James L Bander and CC White. Markov decision processes with noise-corrupted and delayed state observations. Journal of the Operational Research Society, 50:660-668, 1999.
|
| 210 |
+
Amy J Bastian. Learning to predict the future: the cerebellum adapts feedforward movement control. Current opinion in neurobiology, 16(6):645-649, 2006.
|
| 211 |
+
Jose Blanchet, Renyuan Xu, and Zhengyuan Zhou. Delay-adaptive learning in generalized linear contextual bandits. Mathematics of Operations Research, 2023.
|
| 212 |
+
Matthew Botvinick, Jane X Wang, Will Dabney, Kevin J Miller, and Zeb Kurth-Nelson. Deep reinforcement learning and its neuroscientific implications. Neuron, 107(4):603-616, 2020.
|
| 213 |
+
Justin Boyan and Michael Littman. Exact solutions to time-dependent mdps. Advances in Neural Information Processing Systems, 13, 2000.
|
| 214 |
+
Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. Openai gym. arXiv preprint arXiv:1606.01540, 2016.
|
| 215 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020.
|
| 216 |
+
Baiming Chen, Mengdi Xu, Liang Li, and Ding Zhao. Delay-aware model-based reinforcement learning for continuous control. Neurocomputing, 450:119-128, 2021.
|
| 217 |
+
Junyoung Chung, Kyle Kastner, Laurent Dinh, Kratarth Goel, Aaron C Courville, and Yoshua Bengio. A recurrent latent variable model for sequential data. In Advances in Neural Information Processing Systems, pp. 2980-2988, 2015.
|
| 218 |
+
Jonas Degrave, Federico Felici, Jonas Buchli, Michael Neunert, Brendan Tracey, Francesco Carpanese, Timo Ewalds, Roland Hafner, Abbas Abdelmaleki, Diego de Las Casas, et al. Magnetic control of Tokamak plasmas through deep reinforcement learning. Nature, 602(7897):414-419, 2022.
|
| 219 |
+
Yuchen Fang, Kan Ren, Weiqing Liu, Dong Zhou, Weinan Zhang, Jiang Bian, Yong Yu, and Tie-Yan Liu. Universal trading for order execution with oracle policy distillation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 107-115, 2021.
|
| 220 |
+
Jakob Foerster, Gregory Farquhar, Triantafyllos Afouras, Nantas Nardelli, and Shimon Whiteson. Counterfactual multi-agent policy gradients. In Proceedings of the AAAI conference on artificial intelligence, volume 32, 2018.
|
| 221 |
+
Scott Fujimoto, Herke Hoof, and David Meger. Addressing function approximation error in actor-critic methods. In International Conference on Machine Learning, pp. 1587-1596. PMLR, 2018.
|
| 222 |
+
Marcus Gerwig, Karim Hajjar, Albena Dimitrova, Matthias Maschke, Florian P Kolb, Markus Frings, Alfred F Thilmann, Michael Forsting, Hans Christoph Diener, and Dagmar Timmann. Timing of conditioned eyeblink responses is impaired in cerebellar patients. Journal of Neuroscience, 25(15):3919-3931, 2005.
|
| 223 |
+
Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. In International Conference on Machine Learning, pp. 1856-1865, 2018a.
|
| 224 |
+
|
| 225 |
+
Tuomas Haarnoja, Aurick Zhou, Kristian Hartikainen, George Tucker, Sehoon Ha, Jie Tan, Vikash Kumar, Henry Zhu, Abhishek Gupta, Pieter Abbeel, et al. Soft actor-critic algorithms and applications. arXiv preprint arXiv:1812.05905, 2018b.
|
| 226 |
+
Dongqi Han, Kenji Doya, and Jun Tani. Variational recurrent models for solving partially observable control tasks. In International Conference on Learning Representations, 2020.
|
| 227 |
+
Maximilian Igl, Luisa Zintgraf, Tuan Anh Le, Frank Wood, and Shimon Whiteson. Deep variational reinforcement learning for pomdps. In International Conference on Machine Learning, pp. 2117-2126. PMLR, 2018.
|
| 228 |
+
Max Jaderberg, Wojciech M Czarnecki, Iain Dunning, Luke Marris, Guy Lever, Antonio Garcia Castaneda, Charles Beattie, Neil C Rabinowitz, Ari S Morcos, Avraham Ruderman, et al. Human-level performance in 3D multiplayer games with population-based reinforcement learning. Science, 364(6443):859-865, 2019.
|
| 229 |
+
Ammar Jafaripournimchahi, Yingfeng Cai, Hai Wang, Lu Sun, and Biao Yang. Stability analysis of delayed-feedback control effect in the continuum traffic flow of autonomous vehicles without v2i communication. Physica A: Statistical Mechanics and its Applications, 605:127975, 2022.
|
| 230 |
+
Stephen James, Andrew J Davison, and Edward Johns. Transferring end-to-end visuomotor control from simulation to real world for a multi-stage task. In Conference on Robot Learning, pp. 334-343. PMLR, 2017.
|
| 231 |
+
Abhishek Kadian, Joanne Truong, Aaron Gokaslan, Alexander Clegg, Erik Wijmans, Stefan Lee, Manolis Savva, Sonia Chernova, and Dhruv Batra. Sim2real predictivity: Does evaluation in simulation predict real-world performance? IEEE Robotics and Automation Letters, 5(4):6670-6677, 2020.
|
| 232 |
+
Konstantinos V Katsikopoulos and Sascha E Engelbrecht. Markov decision processes with delays and asynchronous cost collection. IEEE transactions on automatic control, 48(4):568-574, 2003.
|
| 233 |
+
Jangwon Kim, Hangyeol Kim, Jiwook Kang, Jongchan Baek, and Soohee Han. Belief projection-based reinforcement learning for environments with delayed feedback. Advances in Neural Information Processing Systems, 36, 2023.
|
| 234 |
+
Gaspard Lambrechts, Adrien Bolland, and Damien Ernst. Informed pomdp: Leveraging additional information in model-based rl. arXiv preprint arXiv:2306.11488, 2023.
|
| 235 |
+
Bingcong Li, Tianyi Chen, and Georgios B Giannakis. Bandit online learning with unknown delays. In The 22nd International Conference on Artificial Intelligence and Statistics, pp. 993-1002. PMLR, 2019.
|
| 236 |
+
Timothy P Lillicrap, Jonathan J Hunt, Alexander Pritzel, Nicolas Heess, Tom Erez, Yuval Tassa, David Silver, and Daan Wierstra. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015.
|
| 237 |
+
Cai Meng, Tianmiao Wang, Wusheng Chou, Sheng Luan, Yuru Zhang, and Zengmin Tian. Remote surgery case: robot-assisted teleneurosurgery. In IEEE International Conference on Robotics and Automation, 2004. Proceedings. ICRA '04. 2004, volume 1, pp. 819-823. IEEE, 2004.
|
| 238 |
+
Tianwei Ni, Benjamin Eysenbach, and Ruslan Salakhutdinov. Recurrent model-free RL can be a strong baseline for many POMDPs. In International Conference on Machine Learning (ICML), pp. 16691-16723. PMLR, 2022.
|
| 239 |
+
Sinno Jialin Pan and Qiang Yang. A survey on transfer learning. IEEE Transactions on knowledge and data engineering, 22(10):1345-1359, 2010.
|
| 240 |
+
Christos H Papadimitriou and John N Tsitsiklis. The complexity of Markov decision processes. Mathematics of Operations Research, 12(3):441-450, 1987.
|
| 241 |
+
Joaquin Quinonero-Candela, Masashi Sugiyama, Anton Schwaighofer, and Neil D Lawrence. Dataset shift in machine learning. Mit Press, 2008.
|
| 242 |
+
|
| 243 |
+
J Schmidhuber. Making the world differentiable: On using fully recurrent self-supervised neural networks for dynamic reinforcement learning and planning in non-stationary environments. *Institut für Informatik, Technische Universität München*, Technical Report FK1-126, 90, 1990.
|
| 244 |
+
John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.
|
| 245 |
+
John Stein. Cerebellar forward models to control movement. The Journal of physiology, 587(Pt 2): 299, 2009.
|
| 246 |
+
Jayakumar Subramanian, Amit Sinha, Raihan Seraj, and Aditya Mahajan. Approximate information state for approximate planning and reinforcement learning in partially observed systems. The Journal of Machine Learning Research, 23(1):483-565, 2022.
|
| 247 |
+
Richard S Sutton and Andrew G Barto. Reinforcement learning: An introduction, volume 1. MIT press Cambridge, 1998.
|
| 248 |
+
Emanuel Todorov, Tom Erez, and Yuval Tassa. Mujoco: A physics engine for model-based control. In 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pp. 5026-5033. IEEE, 2012. doi: 10.1109/IROS.2012.6386109.
|
| 249 |
+
Arun Verma, Zhongxiang Dai, and Bryan Kian Hsiang Low. Bayesian optimization under stochastic delayed feedback. In International Conference on Machine Learning, pp. 22145-22167. PMLR, 2022.
|
| 250 |
+
Oriol Vinyals, Timo Ewalds, Sergey Bartunov, Petko Georgiev, Alexander Sasha Vezhnevets, Michelle Yeo, Alireza Makhzani, Heinrich Kuttler, John Agapiou, Julian Schrittwieser, et al. Starcraft ii: A new challenge for reinforcement learning. arXiv preprint arXiv:1708.04782, 2017.
|
| 251 |
+
Oriol Vinyals, Igor Babuschkin, Wojciech M Czarnecki, Michael Mathieu, Andrew Dudzik, Junyoung Chung, David H Choi, Richard Powell, Timo Ewalds, Petko Georgiev, et al. Grandmaster level in starcraft ii using multi-agent reinforcement learning. Nature, 575(7782):350-354, 2019.
|
| 252 |
+
Nikos Vlassis, Michael L Littman, and David Barber. On the computational complexity of stochastic controller optimization in POMDPs. ACM Transactions on Computation Theory (TOCT), 4(4): 1-8, 2012.
|
| 253 |
+
Thomas J Walsh, Ali Nouri, Lihong Li, and Michael L Littman. Learning and planning in environments with delayed feedback. Autonomous Agents and Multi-Agent Systems, 18:83-105, 2009.
|
| 254 |
+
Yuanyu Wan, Wei-Wei Tu, and Lijun Zhang. Online strongly convex optimization with unknown delays. Machine Learning, 111(3):871-893, 2022.
|
| 255 |
+
|
| 256 |
+
# A ETHICAL STATEMENT AND BROADER IMPACT
|
| 257 |
+
|
| 258 |
+
This work addresses signal delay in DRL with a commitment to transparency, reproducibility, and responsible application of our findings. The potential for misuse of our methods in harmful applications is acknowledged, and we advocate for their use within ethical and legal guidelines.
|
| 259 |
+
|
| 260 |
+
Our work has potential for substantial impact, enhancing efficiency and safety of autonomous systems reliant on DRL. However, the possibility of misuse in ethically ambiguous applications exists. Our standardization of DOMDP could guide future DRL research, but diverse focus within the field is necessary to avoid research biases. We encourage the use of this work responsibly, considering its ethical implications.
|
| 261 |
+
|
| 262 |
+
# B RELATED WORK
|
| 263 |
+
|
| 264 |
+
Signal delay in decision-making Several previous studies have dealt with signal delay in MDPs (Bander & White, 1999; Katsikopoulos & Engelbrecht, 2003; Walsh et al., 2009), however, their results have not been extended to deep RL. Chen et al. (2021) is the only work we found explicitly discuss DRLwD. There is also related work on traditional control and bandit problems with feedback delay. Recent works like Blanchet et al. (2023) and Wan et al. (2022) have tailored upper confidence bounds and Thompson sampling algorithms to handle unknown and stochastic delays in generalized linear contextual bandits. Li et al. (2019) explored the complexity of non-stochastic multi-armed bandit and bandit convex optimization settings with unknown delays. Verma et al. (2022) provided solutions for stochastic delayed feedback in Bayesian optimization, presenting novel algorithms with sub-linear regret guarantees for efficient new function query selection amidst random delays. A concurrent study (Kim et al., 2023) proposes an actor-critic algorithm which shows a great performance gain in DRLwD by compressing the dimension of the augmented state. Our work considers architecture design for DRLwD, which is orthogonal to these studies.
|
| 265 |
+
|
| 266 |
+
POMDP methods Theoretical complexity analysis reveals that solving POMDPs can generally be both statistically and computationally challenging (Papadimitriou & Tsitsiklis, 1987; Vlassis et al., 2012). A common solution for POMDP is to model the function with additional context by employing RNNs, which are used to distill the characteristics of prior observations and actions into the states of the RNN (Schmidhuber, 1990; Jaderberg et al., 2019; Vinyals et al., 2019). While a DOMDP is an instance of the broader POMDPs, we have shown that by incorporating contextual actions into observation turns DOMDP to a MDP problem. Nonetheless, we examined the performance of an state-of-the-art RNN-based POMDP algorithm that performs good for many partially observable tasks (Ni et al., 2022), which is shown to be not suitable for DRLwD.
|
| 267 |
+
|
| 268 |
+
# C FUTURE WORK AND DISCUSSIONS
|
| 269 |
+
|
| 270 |
+
In this section, we focus on the potential integration of timestamp data into our model, a concept that presents promising avenues for future research but also comes with its own set of challenges.
|
| 271 |
+
|
| 272 |
+
# C.1 ENHANCING MODEL ROBUSTNESS WITH TIMITAMP DATA
|
| 273 |
+
|
| 274 |
+
The integration of timestamp data into our model is primarily aimed at optimizing performance in environments with variable delays and managing asynchronous data from multiple sources. This approach is particularly relevant when the historical number $H$ aligns with the delay $\Delta T$ , as it can significantly improve the model's adaptability to changing delay patterns and enhance decision-making in systems with multiple, asynchronous data streams.
|
| 275 |
+
|
| 276 |
+
However, it's important to note that the effectiveness of this method may vary depending on the specific environment. For instance, certain sensors might not support timestamping, or network messages may lack time-related data. This makes the trick not a general solution for all cases
|
| 277 |
+
|
| 278 |
+
These challenges are the primary reason why we have earmarked the integration of timestamp data as a subject for future work rather than including it in the main body of our current paper. Future research will need to address these environmental variabilities and develop adaptable models that can effectively leverage timestamp data where available.
|
| 279 |
+
|
| 280 |
+
# C.2 BROADENING THE APPLICATION BEYOND THE ACTOR-CRITIC FRAMEWORK
|
| 281 |
+
|
| 282 |
+
The issue of delayed signal can arise in a broad range of applications. Hence, it is crucial to develop methodologies that are sufficiently versatile to be integrated into various frameworks. In this project, we introduce a general approach that can be seamlessly incorporated into any actor-critic framework. While some algorithms do not adhere to the actor-critic framework. Expanding our methods to be entirely algorithm- and model-agnostic would significantly enhance their applicability and utility in a wider array of RL contexts.
|
| 283 |
+
|
| 284 |
+
# D THEOREM PROOF
|
| 285 |
+
|
| 286 |
+
# D.1 THEOREM 2.1 DERIVATION
|
| 287 |
+
|
| 288 |
+
Proof. Let action delay be denoted as $\Delta T^a$ and observation delay as $\Delta T^s$ . We denote the actual action acting on the environment at time step $t$ as $a_t$ , and the action given by the agent at step $t$ as $\tilde{a}_t$ . With action delay $\Delta T^a$ , we have $a_t = \tilde{a}_{t - \Delta T^a}$ . The inference delay $\Delta T^I$ can be considered included in either of the action and observation delays. From the agent's view, the state transition probability under policy $\pi (a|\cdot)$ with delay can be written as:
|
| 289 |
+
|
| 290 |
+
$$
|
| 291 |
+
\begin{array}{l} \tilde {\mathcal {P}} (\tilde {s} _ {t + 1} | \tilde {s} _ {t}) \\ = \sum_ {\Delta T ^ {s} = 0} ^ {\Delta T _ {\max } ^ {s}} \mathcal {P} (\Delta T ^ {s}) \mathcal {P} (s _ {t + 1 - \Delta T ^ {s}} | s _ {t - \Delta T ^ {s}}) \\ = \sum_ {\Delta T ^ {s} = 0} ^ {\Delta T _ {\max } ^ {s}} \int_ {a _ {t - \Delta T ^ {s}}} \mathcal {P} (\Delta T ^ {s}) \mathcal {P} (s _ {t + 1 - \Delta T ^ {s}} | s _ {t - \Delta T ^ {s}}, a _ {t - \Delta T ^ {s}}) \mathcal {P} (a _ {t - \Delta T ^ {s}}) d a _ {t - \Delta T ^ {s}} \\ = \sum_ {\Delta T ^ {s} = 0} ^ {\Delta T _ {\max } ^ {s}} \sum_ {\Delta T ^ {a} = 0} ^ {\Delta T _ {\max } ^ {a}} \int_ {\tilde {a} _ {t - \Delta T ^ {s} - \Delta T ^ {a}}} \mathcal {P} (\Delta T ^ {s}, \Delta T ^ {a}) \mathcal {P} \left(s _ {t + 1 - \Delta T ^ {s}} \mid s _ {t - \Delta T ^ {s}}, \tilde {a} _ {t - \Delta T ^ {s} - \Delta T ^ {a}}\right) \\ \pi \left(\tilde {a} _ {t - \Delta T ^ {s} - \Delta T ^ {a}} \mid I _ {t - \Delta T ^ {s} - \Delta T ^ {a}}\right) d \tilde {a} _ {t - \Delta T ^ {s} - \Delta T ^ {a}}. \\ \end{array}
|
| 292 |
+
$$
|
| 293 |
+
|
| 294 |
+
Here, $\mathcal{P}(\Delta T^s)$ and $\mathcal{P}(\Delta T^a)$ denote the probability of state and action signals delay for certain steps. $I_{t - \Delta T^{s} - \Delta T^{a}}$ denotes the information available to the agent for selecting action $\tilde{a}_{t - \Delta T^s -\Delta T^a}$ , which is until time step $t - \Delta T^s -\Delta T^a$ .
|
| 295 |
+
|
| 296 |
+
Since the state transition probability function $\mathcal{P}(s_{t + 1 - \Delta T^s}|s_t - \Delta T^s,\tilde{a}_{t - \Delta T^s -\Delta T^a})$ is Markovian (time-independent), we have
|
| 297 |
+
|
| 298 |
+
$$
|
| 299 |
+
\begin{array}{l} \tilde {\mathcal {P}} (\tilde {s} _ {t + 1} | \tilde {s} _ {t}) \\ = \sum_ {\Delta T ^ {s} = 0} ^ {\Delta T _ {\max } ^ {s}} \sum_ {\Delta T ^ {a} = 0} ^ {\Delta T _ {\max } ^ {a}} \int_ {a} \mathcal {P} (\Delta T ^ {s}, \Delta T ^ {a}) \mathcal {P} (s ^ {\prime} | s, a) \pi (a | I _ {t - (\Delta T ^ {s} + \Delta T ^ {a})}) d a \\ = \sum_ {(\Delta T ^ {a} + \Delta T ^ {s}) = 0} ^ {\Delta T _ {\max } ^ {a} + \Delta T _ {\max } ^ {s}} \int_ {a} \mathcal {P} (\Delta T ^ {s} + \Delta T ^ {a}) \mathcal {P} (s ^ {\prime} | s, a) \pi (a | I _ {t - (\Delta T ^ {s} + \Delta T ^ {a})}) d a. \\ \end{array}
|
| 300 |
+
$$
|
| 301 |
+
|
| 302 |
+
From the outcome, we observe that the delay between decision-making and environmental response $\Delta T^a$ and the delay between environmental output and decision-making $\Delta T^s$ have the same effect on state transition, and what matters is merely $\Delta T^s + \Delta T^a$ . Therefore, we can simply model the MDP with delay problem with only observation delay $\Delta T = \Delta T^s + \Delta T^a$ as in DOMDP.
|
| 303 |
+
|
| 304 |
+

|
| 305 |
+
|
| 306 |
+
# D.2 THEOREM 4.1 DERIVATION
|
| 307 |
+
|
| 308 |
+
Proof. To show the Markovian property when considering the augmented state $(\Delta T$ is max delay)
|
| 309 |
+
|
| 310 |
+
$$
|
| 311 |
+
\bar {s} _ {t} = \left(\tilde {s} _ {t}, a _ {t - \Delta T: t - 1}\right),
|
| 312 |
+
$$
|
| 313 |
+
|
| 314 |
+
we need to show the transition probability function $\bar{\mathcal{P}} (\bar{s}_{t + 1}|\bar{s}_t,a_t)$ is independent of the time step $t$ (Markovian):
|
| 315 |
+
|
| 316 |
+
$$
|
| 317 |
+
\begin{array}{l} \bar {\mathcal {P}} (\bar {s} _ {t + 1} | \bar {s} _ {t}, a _ {t}) \\ = \tilde {\mathcal {P}} \left(\tilde {s} _ {t + 1}, a _ {t - \Delta T + 1: t} \mid \tilde {s} _ {t}, a _ {t - \Delta T: t}\right) \\ = \sum_ {\delta T ^ {\prime} = 0} ^ {\Delta T} \sum_ {\delta T = 0} ^ {\Delta T} \mathcal {P} (\delta T ^ {\prime}) \mathcal {P} (\delta T) \mathcal {P} \left(s _ {t + 1 - \delta T ^ {\prime}}, a _ {t - \Delta T + 1: t - 1} \mid s _ {t - \delta T}, a _ {t - \Delta T: t}\right) \\ = \sum_ {\delta T ^ {\prime} = 0} ^ {\Delta T} \sum_ {\delta T = 0} ^ {\Delta T} \mathcal {P} (\delta T ^ {\prime}) \mathcal {P} (\delta T) \mathcal {P} (s _ {t + 1 - \delta T ^ {\prime}} | s _ {t - \delta T}, a _ {t - \Delta T: t}), \\ \end{array}
|
| 318 |
+
$$
|
| 319 |
+
|
| 320 |
+
where $\mathcal{P}(\delta T)$ is the probability of delay for $\delta T$ steps for $\tilde{s}_t$ , and $\mathcal{P}(\delta T')$ is the probability of delay for $\delta T'$ steps for $\tilde{s}_{t+1}$ . Since $P(\delta T')$ and $P(\delta T)$ do not depend on $t$ according to the definition of DOMDP, we only need to prove that the last term $\mathcal{P}(s_{t+1-\delta T'}|s_{t-\delta T}, a_{t-\Delta T:t})$ is also independent of $t$ (with $0 \leq \delta T, \delta T' \leq \Delta T$ ). Let us discuss case by case:
|
| 321 |
+
|
| 322 |
+
When $\delta T' > \delta T + 1$ , we have $\mathcal{P}(s_{t+1-\delta T'}|s_{t-\delta T}, a_{t-\Delta T:t}) = 0$ because $t + 1 - \delta T' < t - \delta T$ and the previous state should not depend on the future one.
|
| 323 |
+
|
| 324 |
+
When $\delta T' = \delta T + 1$ , we have $\mathcal{P}(s_{t+1-\delta T'} | s_{t-\delta T}, a_{t-\Delta T:t}) = \mathcal{P}(s_{t-\delta T} | s_{t-\delta T}) = 1$ .
|
| 325 |
+
|
| 326 |
+
When $\delta T' = \delta T$ , we have $\mathcal{P}(s_{t+1-\delta T'}|s_{t-\delta T}, a_{t-\Delta T:t}) = \mathcal{P}(s_{t-\delta T+1}|s_{t-\delta T}, a_{t-\delta T})$ , which is Markovian since it is the state transition function of the original MDP.
|
| 327 |
+
|
| 328 |
+
When $\delta T' = \delta T - 1$ , we have
|
| 329 |
+
|
| 330 |
+
$$
|
| 331 |
+
\begin{array}{l} \mathcal {P} \left(s _ {t + 1 - \delta T ^ {\prime}} \mid s _ {t - \delta T}, a _ {t - \Delta T: t}\right) \\ = \mathcal {P} \left(s _ {t - \delta T + 2} \mid s _ {t - \delta T}, a _ {t - \delta T}, a _ {t - \delta T + 1}\right) \\ = \int_ {s _ {t - \delta T + 1}} P (s _ {t - \delta T + 2} | s _ {t - \delta T + 1}, a _ {t - \delta T + 1}) P (s _ {t - \delta T + 1} | s _ {t - \delta T}, a _ {t - \delta T}) d s _ {t - \delta T + 1}, \\ \end{array}
|
| 332 |
+
$$
|
| 333 |
+
|
| 334 |
+
where each of the terms is the state transition function of the original MDP, thus $\mathcal{P}(s_{t + 1 - \delta T'}|s_{t - \delta T},a_{t - \Delta T:t})$ is Markovian.
|
| 335 |
+
|
| 336 |
+
Similarly, when $\delta T' < \delta T - 1$ , $\mathcal{P}(s_{t + 1 - \delta T'}|s_{t - \delta T}, a_{t - \Delta T:t})$ can be written as accumulated integration of state transitions of the original MDP at the different steps, therefore remaining independent of $t$ .
|
| 337 |
+
|
| 338 |
+
So far, we have shown that $P(s_{t+1-\delta T'}|s_{t-\delta T}, a_{t-\Delta T:t})$ is Markovian and thus $\bar{\mathcal{P}}(\bar{s}_{t+1}|\bar{s}_t, a_t)$ is also Markovian.
|
| 339 |
+
|
| 340 |
+
Note that a similar theorem was also derived in (Chen et al., 2021). However, they considered fixed delay step while we consider the more general case of unfixed delay.
|
| 341 |
+
|
| 342 |
+
# E IMPLEMENTATION DETAILS
|
| 343 |
+
|
| 344 |
+
# E.1 ENVIRONMENT SETUP
|
| 345 |
+
|
| 346 |
+
(1) Fixed Delay: This category includes four fundamental Mujoco environments: Walker2D, Ant, HalfCheetah, and Hopper. These environments are generally well-handled by existing methods, avoiding issues such as local optima (e.g., in Reacher) or insurmountable challenges (e.g., in HumanStandup). Thus, they provide suitable baselines for evaluating the impact of domain delay. We modify these environments to return the state from $\Delta T$ steps in the past, thereby simulating a fixed delay. For implementation, we use a FIFO queue with length as $\Delta T$ and update the queue with the new state always return with the one at the end of the queue to the agent.
|
| 347 |
+
(2) Unfixed Delay: This simulation reflects real-world scenarios with random delays (like network latency). We implement it using a FIFO queue of length $\Delta T$ that updates with new states. Each step samples from the queue to give the agent its observation. A straightforward normal sampling isn't used as real-world observations preserve order. Instead, we use a unique sampling strategy, where
|
| 348 |
+
|
| 349 |
+
we sample from $\{0,1,2\}$ shifts compared to the previous step. This maintains order and causes delays to range from 0 to $\Delta T$ with an average delay proportional to $\Delta T$ .
|
| 350 |
+
|
| 351 |
+
(3) Large State Space: The dimensionality of the state space can significantly influence the feasibility of recovering the oracle state from delays. To study this, we utilized the Humanoid environment in Mujoco, which has a state dimension of 376, far larger than that of Ant (27), HalfCheetah (17), Hopper (11), or Walker (17).
|
| 352 |
+
(4) Probabilistic: To approximate the inherent uncertainties of real-world control systems, we applied a probabilistic adaptation to the deterministic Mujoco environments (Todorov et al., 2012). We employed two methods to introduce randomness: (1) Noise action, where Gaussian noise, $\mathcal{N}(0,0.3)$ , is added to the input action implemented by the environment; and (2) Sticky action, where there's a $30\%$ chance that the environment will execute the previous action instead of the current input.
|
| 353 |
+
|
| 354 |
+
# E.2 IMPLEMENTATION OF BASELINES FROM OTHER RESEARCHES
|
| 355 |
+
|
| 356 |
+
This study specifically examines the solving of problem of delayed signal, a characteristic of the environment. Consequently, we modify only the environmental aspects while maintaining the integrity of the baseline algorithms. To ensure accuracy, we strictly utilize the original authors' source code for baseline implementations, altering only the environmental parameters to prevent implementation errors. Additionally, considering that all the baselines (Han et al., 2020; Ni et al., 2022; Chen et al., 2021) have previously been tested in the Mujoco environment as reported in their papers, it is reasonable for us to employ the default parameters established in those studies. This consistency in environmental settings enables a more direct and valid comparison of our findings with existing research.
|
| 357 |
+
|
| 358 |
+
# E.3 NETWORK ARCHITECTURE
|
| 359 |
+
|
| 360 |
+
In this study, we maintain a consistent architecture design across all investigated variants, with a focus on training paradigms and frameworks rather than network specifics. For both actor and critic networks in standard algorithms like SAC, DDPG, and TD3, we use a two-layer MLP with 256 features per layer, employing tanh as the activation function. In the case of RNN networks, we employ two layers of recurrent cells, also with 256 hidden features, to align the number of parameters. For the transformer structure, we use a single-head attention mechanism where key and value share the same tensor, and the query has a separate tensor, each with a feature size of 256. This attention block is applied twice, mirroring the two-layer MLP approach. For prediction-based methods, including those involving encoding, the process involves first encoding inputs into a 256-feature space using a two-layer MLP, followed by another two-layer MLP for making predictions. This uniform architecture across various models allows for a focused analysis on training methodologies, ensuring that observed performance differences are attributable to the training paradigm rather than structural variations.
|
| 361 |
+
|
| 362 |
+
# E.4 HYPERPARAMETERS
|
| 363 |
+
|
| 364 |
+
For a fair and consistent evaluation, we adhere to default hyperparameters as outlined in foundational studies for each algorithm. In the case of SAC, we follow parameters from Haarnoja et al. (2018b). Our approach for technique-specific parameters begins with standard settings, followed by adjustments within a practical range. This includes tuning the prediction loss weight in methods involving prediction and encoding, testing values in the set $\{0.005, 0.01, 0.05, 0.1\}$ . Additionally, we explore auto KL weight tuning, setting target KL loss ranges, and sweeping through values $\{5, 20, 50, 200\}$ . For input format consistency, especially in RNN-based models, we use 32 sequences of length 64. The memory buffer size is matched to the number of environment steps.
|
| 365 |
+
|
| 366 |
+
In scenarios involving multiple parameters, we average performance across each {algorithm, environment, parameter} combination, subsequently identifying parameters that yield the best average performance for each {algorithm, environment} pair. Then, we calculate the average performance of all the environments. This method ensures that performance is not compromised by suboptimal hyperparameter selection.
|
| 367 |
+
|
| 368 |
+
# E.5 DELYED IMPLEMENTATION
|
| 369 |
+
|
| 370 |
+
We developed an accessible plug-in environment wrapper for delayed environments, utilizing the gym.Wrapper from the OpenAI Gymnasium library (Brockman et al., 2016). This wrapper seamlessly integrates with pre-existing environments. In the fixed delay setting, the environment consistently returns observations from $T$ time steps prior. For the more complex and practical unfixed delay setting, the delay can range from 0 to $T$ timesteps. To make it more realistic, we imposed a strict condition ensuring that the incoming observation does not arrive later than previous observations. Furthermore, our implementation accommodates action delay, in accordance with the equivalence established in Sec. 2.2.
|
| 371 |
+
|
| 372 |
+
# E.6 CODE IMPLEMENTATION
|
| 373 |
+
|
| 374 |
+
We designed a modular and configurable implementation of all methods in this paper with seamless compatibility with popular state-of-the-art offline DRL algorithms including DDPG, TD3, SAC. Through which all the designs mentioned in this paper can be easily load or unload for efficient application and further studies. For the hyperparameters of the algorithm, we align them with the values in the original paper where each algorithm was proposed.
|
| 375 |
+
|
| 376 |
+
# E.7 ADDITIONAL DETAILS
|
| 377 |
+
|
| 378 |
+
Our performance analysis is derived from eight independent runs, ensuring statistical reliability. For the "Fixed/Basic" and "Unfixed" environments, evaluations were conducted after 2 million environmental steps, while assessments in other environments were completed after 1 million steps. This approach is based on the understanding that varying environments require different levels of interaction for effective learning.
|
| 379 |
+
|
| 380 |
+
# F SUPPLEMENTARY EXPERIMENTAL RESULTS
|
| 381 |
+
|
| 382 |
+
# F.1 IS REPRESENTATION LEARNING A BOTTLENECK?
|
| 383 |
+
|
| 384 |
+
In this section, we endeavor to design an experiment to ascertain whether the failure of certain baseline models can be attributed to deficiencies in representation learning. Specifically, we examine the Variational Recurrent Model (VRM) as proposed by Han et al. (2020) as a case study. VRM is a method for solving POMDP problems by using a variational recurrent neural network (Chung et al., 2015) which predict next observation from contextual observation and actions. Table 1 shows VRM's dramatic suffering from observation delay. To investigate whether this failure is due to unsuccessful representation learning. We present the representation learning loss (negative evidence lower bound) of the variational recurrent neural network of VRM in each tasks (Table 2). The results reflect no significant difference of the representation learning loss for delay steps 0, 1, 2 and 4. However, VRM's performance (Table 1) with delay=4 is only around one-third of the case with delay=0. Thus, it appears implausible that the difficulties VRM encounters in handling delayed tasks are caused by ineffective representation learning. This suggests that other factors may be contributing to the observed performance degradation.
|
| 385 |
+
|
| 386 |
+
<table><tr><td>Delay</td><td>Walker2d</td><td>HalfCheetah</td><td>Hopper</td><td>Ant</td><td>Average</td></tr><tr><td>0</td><td>-1.87 ± 0.31</td><td>-1.60 ± 0.15</td><td>-2.50 ± 0.07</td><td>-1.68 ± 0.21</td><td>-1.91</td></tr><tr><td>1</td><td>-1.90 ± 0.20</td><td>-1.74 ± 0.10</td><td>-2.54 ± 0.05</td><td>-1.99 ± 0.43</td><td>-2.04</td></tr><tr><td>2</td><td>-2.25 ± 0.24</td><td>-1.70 ± 0.20</td><td>-2.62 ± 0.10</td><td>-1.89 ± 0.21</td><td>-2.10</td></tr><tr><td>4</td><td>-2.26 ± 0.06</td><td>-1.65 ± 0.22</td><td>-2.49 ± 0.09</td><td>-1.81 ± 0.24</td><td>-2.05</td></tr><tr><td>8</td><td>-1.92 ± 0.18</td><td>-1.57 ± 0.08</td><td>-1.81 ± 0.08</td><td>-1.67 ± 0.20</td><td>-1.74</td></tr><tr><td>12</td><td>-1.86 ± 0.14</td><td>-1.29 ± 0.20</td><td>-1.56 ± 0.14</td><td>-1.31 ± 0.10</td><td>-1.50</td></tr></table>
|
| 387 |
+
|
| 388 |
+
Table 2: Representation learning loss (lower the better) of VRM in the end of learning for various environments with different delay steps (fixed delay). Data are mean ± standard deviation.
|
| 389 |
+
|
| 390 |
+
# F.2 DOES REDUNDANT HISTORICAL INFORMATION HAMPER THE PERFORMANCE?
|
| 391 |
+
|
| 392 |
+
In addressing concerns about the potential redundancy of historical information in our model, we conducted experiments to determine the optimal quantity of historical data for enhancing performance. Specifically, we varied the number of past actions $(H)$ considered by the model and assessed its impact under fixed delay conditions $(\Delta t = \{0,1,4\})$
|
| 393 |
+
|
| 394 |
+
Our findings, detailed in Tab. 3, reveal that excluding past actions $(H = 0)$ results in suboptimal performance. For instance, with $\Delta t = 4$ , the absence of historical actions led to a low performance score of 47.6. Conversely, incorporating a moderate history $(H)$ significantly improves performance, as shown by a score of 84.3 for $H = 2$ . However, excessively large histories $(H \geq 8)$ negatively impact performance, evident from a decrease to 55.0 for $H = 12$ . This trend is consistent across different $\Delta t$ values, indicating that a balanced approach to historical data is crucial.
|
| 395 |
+
|
| 396 |
+
Our experiments employed both "Delay Reconciled Training for Critic" and "State Augmentation - MLP" methods, maintaining consistency across all variables except $H$ . The results also shed light on the limited success of RNN structures in our experiments. RNNs inherently process extensive historical information, leading to redundancy, particularly for state and action histories beyond the $\Delta t$ window. This aligns with our theoretical finding (Theorem 4.1) that maintaining the Markovian property requires only historical actions within $\Delta t$ .
|
| 397 |
+
|
| 398 |
+
Table 3: Impact of Historical Information $(H)$ on Model Performance under Different Delays $(\Delta t)$ . The under-performing methods are marked with $\downarrow$ .
|
| 399 |
+
|
| 400 |
+
<table><tr><td rowspan="2">Historical Timesteps H ↓</td><td colspan="3"># Delay Δt</td></tr><tr><td>0</td><td>1</td><td>4</td></tr><tr><td>H = 0</td><td>97.7±24.0</td><td>87.8±22.3</td><td>47.6±20.3↓</td></tr><tr><td>H = 1</td><td>104.5±18.4</td><td>100.2±16.5</td><td>77.7±15.4</td></tr><tr><td>H = 2</td><td>98.8±23.4</td><td>93.7±18.9</td><td>84.3±16.3</td></tr><tr><td>H = 4</td><td>86.8±18.6</td><td>81.6±23.4</td><td>80.3±17.8</td></tr><tr><td>H = 8</td><td>68.3±12.5↓</td><td>73.0±13.6↓</td><td>58.6±15.5</td></tr><tr><td>H = 12</td><td>62.6±14.3↓</td><td>60.9±12.8↓</td><td>55.0±8.3↓</td></tr></table>
|
| 401 |
+
|
| 402 |
+
# F.3 COMPARING FIXEDANDUNFIXEDDELAY
|
| 403 |
+
|
| 404 |
+
Here, we report additional results of algorithms on fixed and unfixed delay in Tab. 7. As most algorithm performances are already reported in Tab. 1, we primarily focus on the comparison of Prediction and Encoding methods' variations. Our findings reveal that detached methods consistently outperform non-detached methods. Specifically, for prediction-based methods, the average performance increases from $68.0\%$ to $83.6\%$ under fixed delay, and $58.8\%$ to $72.5\%$ under unfixed delay. For encoding-based methods, we observe an average performance increase from $76.1\%$ to $84.5\%$ under fixed delay and $70.2\%$ to $77.9\%$ under unfixed delay. These results suggest that the detached design is preferable.
|
| 405 |
+
|
| 406 |
+
# F.4 LARGE OBSERVATION SPACE
|
| 407 |
+
|
| 408 |
+
The performance of various algorithms in environments with large observation spaces is presented in Tab. 5. Consistently, both Delay-Reconciled Training and State Augmentation - MLP yield positive performance enhancements. Notably, State Augmentation - MLP consistently offers the best performance. Conversely, the performances of Prediction† and Encoding† are substantially inferior, implying that explicitly introducing prediction supervision may adversely impact performance when the observaton space is large.
|
| 409 |
+
|
| 410 |
+
# F.5 PROBABILISTIC SETTINGS - GAUSSIAN MUJOCO
|
| 411 |
+
|
| 412 |
+
In this subsection, we explore the impact of probabilistic state transitions by introducing diagonal Gaussian noise $\mathcal{N}(0,\sigma^2)$ to the Mujoco environment. This approach tests our algorithm's robustness under varying degrees of uncertainty.
|
| 413 |
+
|
| 414 |
+
Table 4: Performance (%) of algorithms in fixed and unfixed delay environments (additional comparison of detach and non-detach). The best performing methods of prediction and encoding are highlighted in bold.
|
| 415 |
+
|
| 416 |
+
<table><tr><td rowspan="2">Method</td><td colspan="13"># Delayed Time Steps</td></tr><tr><td>0</td><td colspan="2">1</td><td colspan="2">2</td><td colspan="2">4</td><td colspan="2">8</td><td colspan="2">12</td><td colspan="2">Avg. #≥4</td></tr><tr><td colspan="14">Prediction</td></tr><tr><td>Prediction</td><td>103.1 ±7.8</td><td>77.7 ±11.3</td><td>75.2 ±9.5</td><td>77.1 ±11.5</td><td>80.2 ±10.2</td><td>73.2 ±13.2</td><td>70.4 ±11.6</td><td>70.1 ±8.6</td><td>57.3 ±10.1</td><td>59.6 ±8.3</td><td>47.1 ±11.4</td><td>68.0 ±10.0</td><td>58.8 ±11.1</td></tr><tr><td>Prediction†</td><td>102.1 ±9.7</td><td>101.5 ±13.9</td><td>96.1 ±12.2</td><td>100.5 ±14.4</td><td>99.0 ±12.5</td><td>92.2 ±16.2</td><td>88.6 ±14.4</td><td>85.6 ±10.8</td><td>71.0 ±12.8</td><td>73.0 ±10.3</td><td>58.1 ±14.1</td><td>83.6 ±12.5</td><td>72.5 ±13.8</td></tr><tr><td colspan="14">Encoding</td></tr><tr><td>Encoding</td><td>98.5 ±10.9</td><td>91.7 ±8.1</td><td>102.5 ±8.7</td><td>93.1 ±9.0</td><td>83.4 ±8.5</td><td>81.8 ±8.5</td><td>77.9 ±7.4</td><td>80.1 ±11.2</td><td>70.2 ±9.7</td><td>66.0 ±8.9</td><td>62.5 ±8.7</td><td>76.1 ±9.5</td><td>70.2 ±8.7</td></tr><tr><td>Encoding†</td><td>97.6 ±12.1</td><td>101.4 ±9.0</td><td>113.0 ±9.6</td><td>103.2 ±10.0</td><td>92.0 ±9.5</td><td>90.4 ±9.5</td><td>86.6 ±8.2</td><td>89.9 ±12.4</td><td>77.9 ±10.8</td><td>73.3 ±9.9</td><td>69.2 ±9.7</td><td>84.5 ±10.6</td><td>77.9 ±9.6</td></tr></table>
|
| 417 |
+
|
| 418 |
+
Table 5: Performance (%) of algorithms in environment with large observation space. The top-performing methods are highlighted in bold.
|
| 419 |
+
|
| 420 |
+
<table><tr><td rowspan="2">Method</td><td colspan="6"># Delayed Time Steps</td></tr><tr><td>0</td><td>1</td><td>2</td><td>4</td><td>8</td><td>12</td></tr><tr><td>Vanilla SAC</td><td>24.7±13.6</td><td>11.8±1.9</td><td>12.3±2.8</td><td>2.5±1.2</td><td>2.6±1.7</td><td>2.9±1.8</td></tr><tr><td>Delay-Reconciled Training</td><td>23.9±6.4</td><td>26.6±7.2</td><td>21.8±8.1</td><td>17.3±4.7</td><td>13.2±2.5</td><td>14.6±3.1</td></tr><tr><td>State Augmentation - MLP</td><td>83.8±22.9</td><td>103.4±11.5</td><td>94.6±11.9</td><td>102.7±6.4</td><td>101.3±7.8</td><td>89.7±14.5</td></tr><tr><td>State Augmentation - RNN</td><td>48.3±30.8</td><td>67.7±39.4</td><td>47.8±38.6</td><td>47.5±43.7</td><td>53.6±45.8</td><td>55.4±42.6</td></tr><tr><td>Encoding†</td><td>87.5±24.8</td><td>97.3±15.7</td><td>67.9±21.8</td><td>20.4±4.7</td><td>18.2±3.0</td><td>14.5±2.9</td></tr><tr><td>Prediction†</td><td>87.7±32.6</td><td>22.3±6.2</td><td>16.6±3.7</td><td>18.5±3.9</td><td>16.9±2.4</td><td>14.4±2.9</td></tr></table>
|
| 421 |
+
|
| 422 |
+
Experiments were conducted with noise variances $\sigma = \{0.05, 0.1, 0.2, 0.4\}$ , comparing our modified algorithm (Ours*) against the SAC. Notably, at delay=0, Vanilla SAC and Ours* performances are equivalent.
|
| 423 |
+
|
| 424 |
+
The performance metrics are presented as "Value(Relative%)", The "Value" is consistent with the normalization in Tab. 1. The "Relative%" indicates the relative reward compared to the scenario when delay is 0. Key observations include:
|
| 425 |
+
|
| 426 |
+
- Vanilla SAC shows a marked performance decrease with delay, dropping to $23.78\%$ , $26.67\%$ , $32.39\%$ , and $44.25\%$ for $\sigma$ values of 0.05, 0.1, 0.2, and 0.4, respectively.
|
| 427 |
+
- Our method significantly enhances performance in delayed scenarios, with improvements from $23.78\%$ to $79.57\%$ , $26.67\%$ to $83.14\%$ , $32.39\%$ to $92.25\%$ , and $44.25\%$ to $98.23\%$ for the respective $\sigma$ values.
|
| 428 |
+
|
| 429 |
+
These results demonstrate the effectiveness of our algorithm in environments with probabilistic state transitions, underscoring its robustness and adaptability.
|
| 430 |
+
|
| 431 |
+
# F.6 PROBABILISTIC SETTINGS - NOISY AND STICKY ACTIONS
|
| 432 |
+
|
| 433 |
+
This section reveals additional numerical results for algorithms within probabilistic state transition environments. Performance measures for two probabilistic environment implementations, "sticky action" and "noisy action", are outlined in Tab. 7 and Tab. 8 respectively.
|
| 434 |
+
|
| 435 |
+
Notably, both the "noisy action" and "sticky action" settings intensify the environment's complexity, causing a performance decrease from $100.0\%$ to $72.3\%$ and $64.3\%$ , respectively. For both the "noisy action" and "sticky action" environments, Delay-reconciled training and State Augmentation - MLP exhibit a consistent performance improvement over vanilla SAC in all settings where the delay exceeds one step. This suggests that our design is effective in probabilistic environments as well. However, prediction and encoding-based methods fail to perform satisfactorily within these settings.
|
| 436 |
+
|
| 437 |
+
Table 6: Performance (%) of algorithms in probabilistic (Gaussian State) environment. The top-performing methods are highlighted in bold.
|
| 438 |
+
|
| 439 |
+
<table><tr><td rowspan="2">Scale of Randomness</td><td rowspan="2">Method</td><td colspan="6"># Delayed Time Steps</td></tr><tr><td>0</td><td>1</td><td>2</td><td>4</td><td>8</td><td>12</td></tr><tr><td>σ = 0.05</td><td>Ours*</td><td>32.8 ± 7.4 (100%)</td><td>35.0 ± 7.9 (106.71%)</td><td>32.9 ± 5.7 (100.30%)</td><td>31.7 ± 5.8 (96.65%)</td><td>27.1 ± 7.2 (82.62%)</td><td>26.1 ± 6.0 (79.57%)</td></tr><tr><td>σ = 0.05</td><td>Vanilla SAC</td><td>32.8 ± 7.4 (100%)</td><td>12.4 ± 3.9 (37.80%)</td><td>11.0 ± 1.9 (33.54%)</td><td>9.7 ± 1.5 (29.57%)</td><td>8.1 ± 1.1 (24.70%)</td><td>7.8 ± 1.3 (23.78%)</td></tr><tr><td>σ = 0.1</td><td>Ours*</td><td>25.5 ± 2.9 (100%)</td><td>23.1 ± 4.0 (90.59%)</td><td>24.3 ± 5.3 (95.29%)</td><td>21.2 ± 4.7 (83.14%)</td><td>21.9 ± 6.8 (85.88%)</td><td>21.2 ± 4.2 (83.14%)</td></tr><tr><td>σ = 0.1</td><td>Vanilla SAC</td><td>25.5 ± 2.9 (100%)</td><td>11.0 ± 1.4 (43.14%)</td><td>10.4 ± 1.7 (40.78%)</td><td>9.4 ± 1.3 (36.86%)</td><td>7.4 ± 1.2 (29.02%)</td><td>6.8 ± 3.3 (26.67%)</td></tr><tr><td>σ = 0.2</td><td>Ours*</td><td>14.2 ± 1.2 (100%)</td><td>13.3 ± 1.9 (93.66%)</td><td>13.3 ± 1.8 (93.66%)</td><td>13.4 ± 2.1 (94.37%)</td><td>11.8 ± 2.3 (83.10%)</td><td>13.1 ± 2.5 (92.25%)</td></tr><tr><td>σ = 0.2</td><td>Vanilla SAC</td><td>14.2 ± 1.2 (100%)</td><td>10.9 ± 1.9 (76.76%)</td><td>10.1 ± 1.1 (71.13%)</td><td>8.6 ± 1.5 (60.56%)</td><td>8.0 ± 1.2 (56.34%)</td><td>4.6 ± 5.0 (32.39%)</td></tr><tr><td>σ = 0.4</td><td>Ours*</td><td>11.3 ± 1.7 (100%)</td><td>11.0 ± 1.4 (97.35%)</td><td>10.0 ± 2.3 (88.50%)</td><td>10.8 ± 2.1 (95.58%)</td><td>10.7 ± 1.7 (94.69%)</td><td>11.1 ± 1.1 (98.23%)</td></tr><tr><td>σ = 0.4</td><td>Vanilla SAC</td><td>11.3 ± 1.7 (100%)</td><td>10.0 ± 1.2 (88.50%)</td><td>10.1 ± 1.2 (89.38%)</td><td>8.8 ± 2.1 (77.88%)</td><td>8.0 ± 1.8 (70.80%)</td><td>5.0 ± 5.4 (44.25%)</td></tr></table>
|
| 440 |
+
|
| 441 |
+
Most notably, the prediction method's performance in a noisy action environment drastically drops from $72.8\%$ to $9.9\%$ with just a one-step delay. More importantly, prediction and encoding methods consistently underperform when compared to State Augmentation - MLP, implying that their application does not invariably yield positive performance effects on environments with probabilistic state transition.
|
| 442 |
+
|
| 443 |
+
Table 7: Performance (%) of algorithms in probabilistic (noise action) environment. The top-performing methods are highlighted in bold.
|
| 444 |
+
|
| 445 |
+
<table><tr><td rowspan="2">Method</td><td colspan="6"># Delayed Time Steps</td></tr><tr><td>0</td><td>1</td><td>2</td><td>4</td><td>8</td><td>12</td></tr><tr><td>Vanilla SAC</td><td>72.3±12.4</td><td>48.1±14.7</td><td>38.6±12.9</td><td>14.5±9.4</td><td>3.2±3.5</td><td>0.7±3.8</td></tr><tr><td>Delay-Reconciled Training</td><td>74.7±9.1</td><td>66.6±9.2</td><td>59.8±8.6</td><td>48.3±8.3</td><td>25.4±6.9</td><td>14.2±4.7</td></tr><tr><td>State Augmentation - MLP</td><td>77.9±9.2</td><td>71.3±11.5</td><td>65.7±10.3</td><td>64.6±11.9</td><td>48.8±10.2</td><td>34.1±14.7</td></tr><tr><td>State Augmentation - RNN</td><td>65.4±15.6</td><td>60.7±11.4</td><td>54.1±10.5</td><td>40.9±10.3</td><td>20.3±8.7</td><td>12.5±5.2</td></tr><tr><td>Prediction†</td><td>72.8±18.3</td><td>9.9±4.8</td><td>20.4±10.2</td><td>24.7±8.1</td><td>17.6±3.2</td><td>5.3±1.1</td></tr><tr><td>Encoding†</td><td>35.7±19.8</td><td>41.2±19.4</td><td>43.8±12.7</td><td>39.5±11.6</td><td>25.9±5.7</td><td>25.4±1.9</td></tr></table>
|
| 446 |
+
|
| 447 |
+
Table 8: Performance (%) of algorithms in probabilistic (sticky action) environment. The top-performing methods are highlighted in bold.
|
| 448 |
+
|
| 449 |
+
<table><tr><td rowspan="2">Method</td><td colspan="6"># Delayed Time Steps</td></tr><tr><td>0</td><td>1</td><td>2</td><td>4</td><td>8</td><td>12</td></tr><tr><td>Vanilla SAC</td><td>64.3±18.7</td><td>44.6±16.1</td><td>26.5±15.8</td><td>9.4±10.3</td><td>6.7±3.4</td><td>0.8±3.9</td></tr><tr><td>Delay-Reconciled Training</td><td>63.9±15.6</td><td>70.8±18.2</td><td>66.7±12.9</td><td>52.4±11.5</td><td>29.3±9.2</td><td>18.2±9.6</td></tr><tr><td>State Augmentation - MLP</td><td>68.7±11.2</td><td>62.4±13.5</td><td>62.6±18.3</td><td>57.8±21.9</td><td>38.9±13.6</td><td>32.3±11.5</td></tr><tr><td>State Augmentation - RNN</td><td>57.5±19.8</td><td>57.1±13.4</td><td>51.9±14.8</td><td>39.3±18.6</td><td>16.6±8.7</td><td>15.7±8.4</td></tr><tr><td>Prediction†</td><td>52.9±14.6</td><td>27.3±3.9</td><td>27.8±6.1</td><td>21.4±7.2</td><td>13.5±4.3</td><td>12.6±4.9</td></tr><tr><td>Encoding†</td><td>42.7±4.8</td><td>37.2±2.7</td><td>37.4±2.4</td><td>34.8±12.6</td><td>29.3±7.2</td><td>26.9±4.8</td></tr></table>
|
| 450 |
+
|
| 451 |
+
# F.7 COMPREHENSIVE ANALYSIS OF LEARNING CURVES
|
| 452 |
+
|
| 453 |
+
This section provides an in-depth analysis of the learning curves exhibited by various algorithms across distinct environments. Figures 8 through 15 illustrate the progression of learning in environments characterized by consistent, fixed delays. In contrast, figures 16 through 23 present the learning trajectories within environments where delays are variable and unfixed. The results depicted here are intended to furnish a comprehensive benchmark for subsequent research in this domain. Additionally, we highlight several key insights derived from our analysis.
|
| 454 |
+
|
| 455 |
+
1. Even a 2-step delay can significantly impair the learning efficiency of standard algorithms, leading to nearly flat learning curves.
|
| 456 |
+
2. The impact of delay varies dramatically across different environments. For example, the Hopper environment is relatively less affected by delay, experiencing an overall performance drop of around $10\%$ as illustrated in Fig. 13 and 21. On the other hand, the HalfCheetah environment sees a substantial performance decline of approximately $60\%$ with a 12-step delay.
|
| 457 |
+
3. The presence of delay affects both the speed of learning and optimal performance. As the number of delay steps increases, the learning curve usually shows a steady rise in most cases, indicating the gradual adaptation of the learning process to the delay.
|
| 458 |
+
|
| 459 |
+

|
| 460 |
+
(a) Ant
|
| 461 |
+
|
| 462 |
+

|
| 463 |
+
(b) HalfCheetah
|
| 464 |
+
|
| 465 |
+

|
| 466 |
+
(c) Hopper
|
| 467 |
+
|
| 468 |
+

|
| 469 |
+
(d) Walker2d
|
| 470 |
+
|
| 471 |
+

|
| 472 |
+
Figure 8: Learning curve of algorithms on environment with fixed delay: vanilla DDPG.
|
| 473 |
+
(a) Ant
|
| 474 |
+
Figure 9: Learning curve of algorithms on environment with fixed delay: vanilla TD3.
|
| 475 |
+
|
| 476 |
+

|
| 477 |
+
(b) HalfCheetah
|
| 478 |
+
|
| 479 |
+

|
| 480 |
+
(c) Hopper
|
| 481 |
+
|
| 482 |
+

|
| 483 |
+
(d) Walker2d
|
| 484 |
+
|
| 485 |
+

|
| 486 |
+
(a) Ant
|
| 487 |
+
|
| 488 |
+

|
| 489 |
+
(b) HalfCheetah
|
| 490 |
+
|
| 491 |
+

|
| 492 |
+
(c) Hopper
|
| 493 |
+
|
| 494 |
+

|
| 495 |
+
(d) Walker2d
|
| 496 |
+
|
| 497 |
+

|
| 498 |
+
Figure 10: Learning curve of algorithms on environment with fixed delay: vanilla SAC.
|
| 499 |
+
(a) Ant
|
| 500 |
+
Figure 11: Learning curve of algorithms on environment with fixed delay: RNN Strong.
|
| 501 |
+
|
| 502 |
+

|
| 503 |
+
(b) HalfCheetah
|
| 504 |
+
|
| 505 |
+

|
| 506 |
+
(c) Hopper
|
| 507 |
+
|
| 508 |
+

|
| 509 |
+
(d) Walker2d
|
| 510 |
+
|
| 511 |
+

|
| 512 |
+
(a) Ant
|
| 513 |
+
Figure 12: Learning curve of algorithms on environment with fixed delay: delay-reconciled critic training.
|
| 514 |
+
|
| 515 |
+

|
| 516 |
+
(b) HalfCheetah
|
| 517 |
+
|
| 518 |
+

|
| 519 |
+
(c) Hopper
|
| 520 |
+
|
| 521 |
+

|
| 522 |
+
(d) Walker2d
|
| 523 |
+
|
| 524 |
+

|
| 525 |
+
(a) Ant
|
| 526 |
+
Figure 13: Learning curve of algorithms on environment with fixed delay: state augmentation - MLP.
|
| 527 |
+
|
| 528 |
+

|
| 529 |
+
(b) HalfCheetah
|
| 530 |
+
|
| 531 |
+

|
| 532 |
+
(c) Hopper
|
| 533 |
+
|
| 534 |
+

|
| 535 |
+
(d) Walker2d
|
| 536 |
+
|
| 537 |
+

|
| 538 |
+
(a) Ant
|
| 539 |
+
|
| 540 |
+

|
| 541 |
+
(b) HalfCheetah
|
| 542 |
+
|
| 543 |
+

|
| 544 |
+
(c) Hopper
|
| 545 |
+
Figure 14: Learning curve of algorithms on environment with fixed delay: state augmentation - RNN.
|
| 546 |
+
|
| 547 |
+

|
| 548 |
+
(d) Walker2d
|
| 549 |
+
|
| 550 |
+

|
| 551 |
+
(a) Ant
|
| 552 |
+
|
| 553 |
+

|
| 554 |
+
(b) HalfCheetah
|
| 555 |
+
|
| 556 |
+

|
| 557 |
+
(c) Hopper
|
| 558 |
+
Figure 15: Learning curve of algorithms on environment with fixed delay: symmetric - MLP.
|
| 559 |
+
|
| 560 |
+

|
| 561 |
+
(d) Walker2d
|
| 562 |
+
|
| 563 |
+

|
| 564 |
+
(a) Ant
|
| 565 |
+
Figure 16: Learning curve of algorithms on environment with unfixed delay: vanilla DDPG.
|
| 566 |
+
|
| 567 |
+

|
| 568 |
+
(b) HalfCheetah
|
| 569 |
+
|
| 570 |
+

|
| 571 |
+
(c) Hopper
|
| 572 |
+
|
| 573 |
+

|
| 574 |
+
(d) Walker2d
|
| 575 |
+
|
| 576 |
+

|
| 577 |
+
(a) Ant
|
| 578 |
+
Figure 17: Learning curve of algorithms on environment with unfixed delay: vanilla TD3.
|
| 579 |
+
|
| 580 |
+

|
| 581 |
+
(b) HalfCheetah
|
| 582 |
+
|
| 583 |
+

|
| 584 |
+
(c) Hopper
|
| 585 |
+
|
| 586 |
+

|
| 587 |
+
(d) Walker2d
|
| 588 |
+
|
| 589 |
+

|
| 590 |
+
(a) Ant
|
| 591 |
+
|
| 592 |
+

|
| 593 |
+
(b) HalfCheetah
|
| 594 |
+
|
| 595 |
+

|
| 596 |
+
(c) Hopper
|
| 597 |
+
Figure 18: Learning curve of algorithms on environment with unfixed delay: vanilla SAC.
|
| 598 |
+
|
| 599 |
+

|
| 600 |
+
(d) Walker2d
|
| 601 |
+
|
| 602 |
+

|
| 603 |
+
(a) Ant
|
| 604 |
+
|
| 605 |
+

|
| 606 |
+
(b) HalfCheetah
|
| 607 |
+
|
| 608 |
+

|
| 609 |
+
(c) Hopper
|
| 610 |
+
Figure 19: Learning curve of algorithms on environment with unfixed delay: RNN Strong.
|
| 611 |
+
|
| 612 |
+

|
| 613 |
+
(d) Walker2d
|
| 614 |
+
|
| 615 |
+

|
| 616 |
+
(a) Ant
|
| 617 |
+
Figure 20: Learning curve of algorithms on environment with unfixed delay: delay-reconciled critical training.
|
| 618 |
+
|
| 619 |
+

|
| 620 |
+
(b) HalfCheetah
|
| 621 |
+
|
| 622 |
+

|
| 623 |
+
(c) Hopper
|
| 624 |
+
|
| 625 |
+

|
| 626 |
+
(d) Walker2d
|
| 627 |
+
|
| 628 |
+

|
| 629 |
+
(a) Ant
|
| 630 |
+
Figure 21: Learning curve of algorithms on environment with unfixed delay: state augmentation - MLP.
|
| 631 |
+
|
| 632 |
+

|
| 633 |
+
(b) HalfCheetah
|
| 634 |
+
|
| 635 |
+

|
| 636 |
+
(c) Hopper
|
| 637 |
+
|
| 638 |
+

|
| 639 |
+
(d) Walker2d
|
| 640 |
+
|
| 641 |
+

|
| 642 |
+
(a) Ant
|
| 643 |
+
|
| 644 |
+

|
| 645 |
+
(b) HalfCheetah
|
| 646 |
+
|
| 647 |
+

|
| 648 |
+
(c) Hopper
|
| 649 |
+
Figure 22: Learning curve of algorithms on environment with unfixed delay: state augmentation - RNN.
|
| 650 |
+
|
| 651 |
+

|
| 652 |
+
(d) Walker2d
|
| 653 |
+
|
| 654 |
+

|
| 655 |
+
(a) Ant
|
| 656 |
+
Figure 23: Learning curve of algorithms on the environment with unfixed delay: symmetric - MLP.
|
| 657 |
+
|
| 658 |
+

|
| 659 |
+
(b) HalfCheetah
|
| 660 |
+
|
| 661 |
+

|
| 662 |
+
(c) Hopper
|
| 663 |
+
|
| 664 |
+

|
| 665 |
+
(d) Walker2d
|
addressingsignaldelayindeepreinforcementlearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df5a102a579e2acb52433cdf603941eada29edeaa75f82fe75cf5f1f1bc77bcf
|
| 3 |
+
size 1635548
|
addressingsignaldelayindeepreinforcementlearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3d0547bcc268a8e153225fd16a389e00e1760acf51eb40829eb463d42c830af3
|
| 3 |
+
size 945850
|
adversarialautomixup/51bc7332-976c-4305-8ab7-b6e024fbaeec_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d971dcb2098ebd251d5376c963efae585c790a8bb21c9e6c38ad288a036caecb
|
| 3 |
+
size 115978
|
adversarialautomixup/51bc7332-976c-4305-8ab7-b6e024fbaeec_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c723534a20146034c79e19948c8c070d0a519c35fa2b3d3f0a26cb59abbe9f0
|
| 3 |
+
size 140267
|
adversarialautomixup/51bc7332-976c-4305-8ab7-b6e024fbaeec_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d375079908b62d4f0af432d0d9073a126bd66ab9b03e488c61878b160b0b7143
|
| 3 |
+
size 1513394
|
adversarialautomixup/full.md
ADDED
|
@@ -0,0 +1,502 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ADVERSARIAL AUTOMIXUP
|
| 2 |
+
|
| 3 |
+
Huafeng Qin $^{1,\ast,\dagger}$ Xin Jin $^{1,\ast}$ Yun Jiang $^{1}$ Mounim A. El-Yacoubi $^{2}$ Xinbo Gao $^{3}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>Chongqing Technology and Business University
|
| 6 |
+
2Telecom SudParis, Institut Polytechnique de Paris
|
| 7 |
+
<sup>3</sup>Chongqing University of Posts and Telecommunications
|
| 8 |
+
* Equal contribution † Corresponding author
|
| 9 |
+
|
| 10 |
+
# ABSTRACT
|
| 11 |
+
|
| 12 |
+
Data mixing augmentation has been widely applied to improve the generalization ability of deep neural networks. Recently, offline data mixing augmentation, e.g. handcrafted and saliency information-based mixup, has been gradually replaced by automatic mixing approaches. Through minimizing two sub-tasks, namely, mixed sample generation and mixup classification in an end-to-end way, AutoMix significantly improves accuracy on image classification tasks. However, as the optimization objective is consistent for the two sub-tasks, this approach is prone to generating consistent instead of diverse mixed samples, which results in overfitting for target task training. In this paper, we propose AdAutomixup, an adversarial automatic mixup augmentation approach that generates challenging samples to train a robust classifier for image classification, by alternatively optimizing the classifier and the mixup sample generator. AdAutomixup comprises two modules, a mixed example generator, and a target classifier. The mixed sample generator aims to produce hard mixed examples to challenge the target classifier, while the target classifier's aim is to learn robust features from hard mixed examples to improve generalization. To prevent the collapse of the inherent meanings of images, we further introduce an exponential moving average (EMA) teacher and cosine similarity to train AdAutomixup in an end-to-end way. Extensive experiments on seven image benchmarks consistently prove that our approach outperforms the state of the art in various classification scenarios. The source code is available at https://github.com/JinXins/Adversarial-AutoMixup.
|
| 13 |
+
|
| 14 |
+
# 1 INTRODUCTION
|
| 15 |
+
|
| 16 |
+
Due to their robust feature representation capacity, Deep neural network models, such as convolutional neural networks (CNN) and transformers, have been successfully applied in various tasks, e.g., image classification (Li et al., 2022c; Krizhevsky et al., 2012; Li et al., 2022a; 2024), object detection (Bochkovskiy et al., 2020), and natural language processing (Vaswani et al., 2017). One of the important reasons is that they generally exploit large training datasets to train massive network parameters. When the data is insufficient, however, they become prone to over-fitting and make overconfident predictions, which may degrade the generalization performance on test examples.
|
| 17 |
+
|
| 18 |
+
To alleviate these drawbacks, data augmentation (DA) is proposed to generate samples to improve generalization on downstream target tasks. Mixup (Zhang et al., 2017), a recent DA scheme, has received increasing attention as it can produce virtual mixup examples via a simple convex combination of pairs of examples and their labels to effectively train a deep learning (DL) model. DA approaches (Li et al., 2021; Shorten & Khoshgoftaar, 2019; Cubuk et al., 2018; 2020; Fang et al., 2020; Ren et al., 2015; Li et al., 2020), proposed for image classification, can be broadly split into three categories: 1) Handcrafted-based mixup augmentation approaches, where patches from one image are randomly cut and pasted onto another. The ground truth label of the latter is mixed with the label of the former proportionally to the area of the replaced patches. Representative approaches include CutMix (Yun et al., 2019), Cutout (DeVries & Taylor, 2017), ManifoldMixup (Verma et al., 2019), and ResizeMix (Qin et al., 2020). CutMix and ResizeMix, as shown in Fig. 1, generate mixup samples by randomly replacing a patch in an image with patches from another; 2) Saliency-guided mixup augmentation approaches that generate, based on image saliency maps, high-quality samples by preserving regions of maximum saliency. Representative approaches (Uddin et al., 2020; Walawalkar et al., 2020; Kim et al., 2020; Park et al., 2021; Liu et al., 2022c) learn the optimal
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
Figure 1: Mixed images of various approaches. (a) Accuracy of ResNet18 trained by different mixup approaches with 200 epochs on CIFAR100. (b) Mixed images of various mixup-based approaches.
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Sample A
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
MixUp
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
FMix
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
ResizeMix
|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
AttentiveMix
|
| 37 |
+
|
| 38 |
+

|
| 39 |
+
AutoMix
|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
Sample B
|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
CutMi
|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
GridMix
|
| 49 |
+
|
| 50 |
+

|
| 51 |
+
SmoothMix
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
PuzzleMi:
|
| 55 |
+
|
| 56 |
+

|
| 57 |
+
AdAutoMix
|
| 58 |
+
|
| 59 |
+
mixing policy by maximizing the saliency regions; 3) Automatic Mixup-based augmentation approaches, that learn a model, e.g. DL model, instead of a policy, to automatically generate mixed images. (Liu et al., 2022d) for example, proposed an AutoMix model for DA, consisting of a target classifier and a generative network, to automatically generate mixed samples to train a robust classifier by alternatively optimizing the target classifier and the generative network.
|
| 60 |
+
|
| 61 |
+
The handcrafted mixup augmentation approaches, however, randomly mix images without considering their contexts and labels. The target objects, therefore, may be missed in the mixed images, resulting in a label mismatch problem. Saliency-guided-based mixup augmentation methods can alleviate the problem as the images are combined with supervising information, namely the maximum saliency region. These mixup models, related to the first two categories above, share the same learning paradigm: an augmented training dataset generated by random or learnable mixing policy and a DL model for image classification. As image generation is not directly related to the target task, i.e., classification, the generated images guided by human prior knowledge, i.e., saliency-based, may not be effective for target network training. Moreover, it is impossible to generate all possible mixed instances for target training. The randomly selected synthesized samples thus may not be representative of the classification task, ultimately degrading classifier generalization. Besides, such generated samples will be input to the target network repeatedly, resulting in inevitable overfitting over long epoch training. To overcome these problems, automatic mixup-based augmentation approaches generate augmented images by a sub-network with a good complexity-accuracy trade-off. This approach comprises two sub-tasks: a mixed sample generation module and a classification module, conjointly optimized by minimizing the classification loss in an end-to-end way. As the optimizing goal is consistent for the two sub-tasks, however, the generation module may not be effectively guided and may produce, consequently, simple mixed samples to achieve such a goal, which limits sample diversification. The classifier trained on such simple examples is prone, therefore, to suffer from overfitting, leading to poor generalization performance on the testing set. Another limitation is that current automatic mixup approaches mix two images only for image generation, where the rich and discriminating information is not efficiently exploited.
|
| 62 |
+
|
| 63 |
+
To solve these problems, we propose in this paper AdAutomixup, an adversarial automatic mixup augmentation approach to automatically generate mixed samples with adversarial training in an end-to-end way, as shown in Fig. 2. First, an attention-based generator is investigated to dynamically learn discriminating pixels from a sample pair associated with the corresponding mixed labels. Second, we combine the attention-based generator with the target classifier to build an adversarial network, where the generator and the classifier are alternatively updated by adversarial training. Unlike AutoMix (Liu et al., 2022d), a generator is learned to increase the training loss of the target network through generating adversarial samples, while the classifier learns more robust features from hard examples to improve generalization. Furthermore, any set of images, instead of two images only, can be taken as an input to our generator for mixing image generation, which results in more diversification of the mixed samples. Our main contributions are summarized as follows.
|
| 64 |
+
|
| 65 |
+
(a) We present an online data mixing approach based on an adversarial learning policy, trained end-to-end to automatically produce mixed samples.
|
| 66 |
+
(b) We propose an adversarial framework to jointly optimize the target network training and the mixup sample generator. The generator aims to produce hard samples to increase the target network loss while the target network, trained on such hard samples, learns a robust representation to improve classification. To avoid the collapse of the inherent meanings of images, we apply an exponential moving average (EMA) and cosine similarity to reduce the search space.
|
| 67 |
+
(c) We explore an attention-based mix sample generator that can combine multiple samples instead of only two samples to generate mixed samples. The generator is flexible as its architecture is not changed with the increase of input images.
|
| 68 |
+
|
| 69 |
+
# 2 RELATED WORK
|
| 70 |
+
|
| 71 |
+
Hand-crafted based mixup augmentation Mixup (Zhang et al., 2017), the first hybrid data augmentation method, generates mixed samples by subtracting any two samples and their one-hot labels. ManifoldMixup (Verma et al., 2019) extended this mixup from input space to feature space. To exploit spatial locality, CutMix (Yun et al., 2019) crops out a region and replace it with a patch of another image. To improve MixUp and CutMix, FMix (Harris et al., 2020) uses random binary masks obtained by applying a threshold to low-frequency images sampled from the frequency space. RecursiveMix (Yang et al., 2022) iteratively resizes the input image patch from the previous iteration and pastes it into the current patch. To solve the strong "edge" problem caused by CutMix, SmoothMix (Jeong et al., 2021) blends mixed images based on soft edges, with the training labels computed accordingly.
|
| 72 |
+
|
| 73 |
+
Saliency guided based mixup augmentation SaliencyMix (Uddin et al., 2020), SnapMix (Huang et al., 2020) and Attentive-CutMix (Walawalkar et al., 2020) generate mixed images based on the salient region detected by the Class Activation Mapping(CAM) (Selvaraju et al., 2019) or saliency detector. Similarly, PuzzleMix (Kim et al., 2020) and Co-Mixup (Kim et al., 2021) propose an optimization strategy to obtain the optimal mask by maximizing the sample saliency region. These approaches, however, suffer from a lack of sample diversification as they always deterministically select regions with maximum saliency. To solve the problem, Saliency Grafting (Park et al., 2021) scales and thresholds the saliency map to grant all salient regions are considered to increase sample diversity. Inspired by the success of Vit (Dosovitskiy et al., 2021; Liu et al., 2021) in computer vision, adaptive mixing policies based on attentive maps, e.g., TransMix (Chen et al., 2021), TokenMix (Liu et al., 2022a), TokenMixup (Choi et al., 2022), MixPro (Zhao et al., 2023), and SMMix (Chen et al., 2022), were proposed to generate mixed images.
|
| 74 |
+
|
| 75 |
+
Automatic Mixup based augmentation Mixup approaches in the first two categories allow a trade-off between precise mixing policies and optimization complexity, as the image mixing task is not directly related to the target classification task during the training process. To solve this problem, AutoMix (Liu et al., 2022d) divides the mixup classification into two sub-tasks, mixed sample generation and mixup classification, and proposes an automatic mixup framework where the two sub-tasks are optimized jointly, instead of independently. During training, the generator continuously produces the mixed samples while the target classifier is preserved for classification. In recent years, adversarial data augmentation (Zhao et al., 2020) and generative adversarial networks (Antoniou et al., 2017) were proposed to automatically generate images for data augmentation. To solve the domain shift problem, Adversarial MixUp (Zhang et al., 2023; Xu et al., 2019) have been investigated to synthesize mix samples or features for domain adaptation. Although there are very few works for automatic mixup, it will become a research trend in the future.
|
| 76 |
+
|
| 77 |
+
# 3 ADAUTOMIX
|
| 78 |
+
|
| 79 |
+
In this section, we present the implementation of AdAutoMix, which is composed of a target classifier and a generator, as shown in Fig. 2. First, we introduce the mixup classification problem and define the loss functions. Then, we detail our attention-based generator that learns dynamically the augmentation mask policy for image generation. Finally, we show how the target classifier and the generator are jointly optimized in an end-to-end way.
|
| 80 |
+
|
| 81 |
+
# 3.1 DEEP LEARNING-BASED CLASSIFIERS
|
| 82 |
+
|
| 83 |
+
Assume that $\mathbb{S} = \{x_s|s = 1,2,\dots,S\}$ is a training set, where $S$ is the number of the images. We select any $N$ samples from $\mathbb{S}$ to obtain a sample set $\mathbb{X} = \{x_1,x_2,\dots,x_N\}$ , with $\mathbb{Y} = \{y_1,y_2,\dots,y_N\}$ its corresponding label set. Let $\psi_W$ be any feature extraction model, e.g., ResNet (He et al., 2016), where $W$ is a trainable weight vector. The classifier maps example $x\in \mathbb{X}$ into label $y\in \mathbb{Y}$ . A DL classifier $\psi_W$ is implemented to predict the posterior class probability, and $W$ are learned by minimizing the classification loss, i.e. the cross entropy (CE) loss in Eq.(1):
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\mathrm {L} _ {\mathrm {c e}} \left(\psi_ {W}, y\right) = - y \log \left(\psi_ {W} (x)\right). \tag {1}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+

|
| 90 |
+
Figure 2: Illustration of AdAutoMix framework. AdAutoMix consists of a generator module and a target module, which are alternatively trained end-to-end. The generator module aims to produce hard samples to challenge the target network while the target network, trained on such hard samples, learns a robust feature representation for classification.
|
| 91 |
+
|
| 92 |
+
For $N$ samples in sample set $\mathbb{X}$ , the average cross-entropy (ACE) loss is computed by Eq.(2):
|
| 93 |
+
|
| 94 |
+
$$
|
| 95 |
+
\operatorname {L} _ {\mathrm {a c e}} \left(\psi_ {W}, \mathbb {Y}\right) = \sum_ {n = 1} ^ {N} \left(\operatorname {L} _ {\mathrm {c e}} \left(\psi_ {W} \left(x _ {n}\right), y _ {n}\right) * \lambda_ {n}\right). \tag {2}
|
| 96 |
+
$$
|
| 97 |
+
|
| 98 |
+
where $*$ is the scalar multiplication. In the mixup classification task, we input any $N$ images associated with mixed ratios $\lambda$ to a generator $G_{\theta}(\cdot)$ that outputs a mixed sample $x_{mix}$ , as defined in Eq.(8) from section 3.2. Similarly, the label for such a mixed image $x_{mix}$ is obtained by $y_{mix} = \sum_{n=1}^{N} y_n \odot \lambda_n$ . $\psi_W$ is optimized by average mixup cross-entropy (AMCE) loss in Eq.(3):
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\mathrm {L} _ {\mathrm {a m c e}} \left(\psi_ {W}, \mathbb {Y}\right) = \mathrm {L} _ {\mathrm {c e}} \left(\psi_ {W} \left(x _ {\text {m i x}}\right), y _ {\text {m i x}}\right). \tag {3}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
Similarly, we also compute the mixup cross-entropy (MCE) by Eq.(4):
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
\mathrm {L} _ {\mathrm {m c e}} \left(\psi_ {W}, y _ {m i x}\right) = \mathrm {L} _ {\mathrm {c e}} \left(\psi_ {W} \left(\sum_ {n = 1} ^ {N} \left(x _ {n} * \lambda_ {n}\right)\right), y _ {m i x}\right). \tag {4}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
# 3.2 GENERATOR
|
| 111 |
+
|
| 112 |
+
As described in Section 2, most existing approaches mix two samples by manually designed policies or automatic learning policies, which results in insufficient exploitation of the supervised information that might be provided by the training samples for data augmentation. In our work, we present a universal generation framework to extend the two-image mixing to multiple-image mixing. To learn a robust mixing policy matrix, we leverage a self-attention mechanism to propose an attention-based mixed sample generator, as shown in Fig. 3. As described in Section 3.1, $\mathbb{X} = \{x_{n}|n = 1,2,\dots,N\}$ is a sample set with $N$ original training samples and $\mathbb{Y} = \{Y_n|n = 1,2,\dots,N\}$ are the corresponding labels. We define $\lambda = \{\lambda_1,\lambda_2,\dots,\lambda_N\}$ as the mixed ratio set for the images with their sum constrained to be equal to 1. As shown in Fig. 3, each image in an image set is first mapped to a feature map with encoder $E_{\phi}$ , which is updated by an exponential moving average of the target classifier, i.e. $\widehat{\phi} = \xi \widehat{\phi} + (1 - \xi)W'$ , where $W'$ is the partial weight of the target classifier. In our experiments, existing classifiers, ResNet18, ResNet34, and ResNeXt50, are used as target classifiers, and $W'$ is the weight vector of the first three layers in the target classifier. Then, the mixed ratios are embedded into the resulting feature map to enable the generator to learn mask policies for image mixing. For example, given $n$ th image $x_{n} \in R^{W \times H}$ , where $W$ and $H$ represent image size, we input it to an encoder and take outputs from its $l$ th layer as feature map $z_{n}^{l} \in R^{C \times w \times h}$ , where $C$ is the number of channels, and $w$ and $h$ represent map size. Then, we build a matrix with size $w \times h$ with all values equal to 1, multiplied by the corresponding ratio $\lambda_{n}$ to obtain embedding matrix $M_{\lambda_{n}}$ . We embed $\lambda_{n}$ with the $l$ th feature map in a simple and efficient way by concatenating $z_{\lambda_{n}}^{l} = \text{concat}(M_{\lambda_{n}}, z_{n}^{l}) \in R^{(C + 1) \times w \times h}$ . The embedding feature map $z_{\lambda_{n}}^{l}$ is mapped to three embedding vectors by three CNNs with $1 \times 1$ kernel (as shown in Fig. 3), respectively. Therefore, we obtain three vectors $q_{n}, k_{n}$ , and $v_{n}$ for the $n$ th image $x_{n}$ . Note that the channel number is reduced
|
| 113 |
+
|
| 114 |
+

|
| 115 |
+
Figure 3: Mixed module: the cross attention block (CAB), used to learn the policy matrix for each image, is combined with $v_{i}(1 = 1,2,\dots,N)$ values to compute the policy matrix for image mixing.
|
| 116 |
+
|
| 117 |
+
to its half for $q_{n}$ and $k_{n}$ to save computation time and is set to 1 for $v_{n}$ . In this way, the embedding vectors of all images are computed and denoted by $q_{1}, q_{2}, \dots, q_{N}, k_{1}, k_{2}, \dots, k_{N}$ , and $v_{1}, v_{2}, \dots, v_{N}$ . The cross attention block (CAB) (as shown in Fig. 3) for the $n$ th image is computed by Eq. (5):
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
P _ {n} = \operatorname {S o f t m a x} \left(\frac {\sum_ {i = 1 , i \neq n} ^ {N} q _ {n} ^ {T} k _ {i}}{\sqrt {d}}\right) v _ {n}, \tag {5}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
where $d$ is the normalization term. We concatenate $N$ attention matrices by Eq. (6):
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
P = \operatorname {S o f t m a x} \left(\operatorname {C o n c a t} \left(P _ {1}, P _ {2}, \dots , P _ {N}\right)\right). \tag {6}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
The matrix $P \in R^{N \times wh \times wh}$ is resized to $P' \in R^{N \times W \times H}$ by upsampling. We split $N$ matrices, $P_1', P_2', \ldots, P_N'$ from $P'$ , treated as mask policy matrices to mix images in the sample set $\mathbb{X}$ by Eq.(7):
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
x _ {m i x} = \sum_ {n = 1} ^ {N} x _ {n} \odot P _ {n} ^ {\prime}, \tag {7}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
where $\odot$ denotes the Hadamard product. To facilitate representation, the mixed image generation procedure is denoted as a generator $G_{\theta}$ by Eq.(8):
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
x _ {m i x} = G _ {\theta} (\mathbb {X}, \lambda), \tag {8}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
where $\theta$ represents all the learnable parameters of the generator.
|
| 142 |
+
|
| 143 |
+
# 3.3 ADVERSARIAL AUGMENTATION
|
| 144 |
+
|
| 145 |
+
This section provides the adversarial framework we propose to jointly optimize the target network $\psi_W$ and the generator $G_{\theta}$ through adversarial learning. Concretely, the generator $G_{\theta}$ attempts to produce an augmented mixed image set to increase the loss of target network $\psi_W$ while target network $\psi_W$ aims to minimize the classification loss. An equilibrium can be reached where the learned representation reaches maximized performance.
|
| 146 |
+
|
| 147 |
+
# 3.3.1 ADVERSARIAL LOSS
|
| 148 |
+
|
| 149 |
+
As shown in Eq.(8), the generator takes $\mathbb{X}$ and the set of mixed ratio $\lambda$ as input and outputs a synthesized image $x_{mix}$ to challenge the target classifier. The latter receives either a real or a synthesized image from the generator as its input and then predicts its probability of belonging to each class. The adversarial loss is defined by the following minimax problem to train both players by Eq.(9):
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
W ^ {*}, \theta^ {*} = \underset {W} {\operatorname {a r g m i n}} \underset {\theta} {\max } [ \underset {\mathbb {X} \in \mathbb {S}} {\mathbb {E}} [ \mathrm {L} _ {\text {a m c e}} (\psi_ {W}, \mathbb {Y}) ] ], \tag {9}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
where $\mathbb{S}$ and $\mathbb{X}$ are the training set and image set, respectively. A robust classifier should correctly classify not only the mixed images, but also the original ones, so we incorporate two regularization terms $\mathrm{L}_{\mathrm{mce}}(\psi_W(x_{mix},y_{mix}))$ and $\mathrm{L}_{\mathrm{ace}}(\psi_W,\mathbb{Y})$ to enhance performance. Accordingly, the objective function is rewritten as shown by Eq.(10):
|
| 156 |
+
|
| 157 |
+
$$
|
| 158 |
+
W ^ {*}, \theta^ {*} = \underset {W} {\operatorname {a r g m i n}} \underset {\theta} {\max } [ \underset {\mathbb {X} \in \mathbb {S}} {\mathbb {E}} \left[ \mathrm {L} _ {\mathrm {a m c e}} \left(\psi_ {W}, \mathbb {Y}\right) + \alpha \mathrm {L} _ {\mathrm {m c e}} \left(\psi_ {W}, y _ {m i x}\right) + (1 - \alpha) \mathrm {L} _ {\mathrm {a c e}} \left(\psi_ {W}, \mathbb {Y}\right) \right] ]. \tag {10}
|
| 159 |
+
$$
|
| 160 |
+
|
| 161 |
+
To optimize parameter $\theta$ , $G_{\theta}(\cdot)$ produces images with given image sets to challenge the classifier. It is possible, therefore, that the inherent meanings of images (i.e. their semantic meaning) collapse.
|
| 162 |
+
|
| 163 |
+
To tackle this issue, we introduce cosine similarity and a teacher model as two regularization terms to control the quality of mixed images. The loss is changed accordingly, as shown by Eq.(11):
|
| 164 |
+
|
| 165 |
+
$$
|
| 166 |
+
\begin{array}{l} W ^ {*}, \theta^ {*} = \underset {W} {\operatorname {a r g m i n}} \underset {\theta} {\max } \left[ \underset {\mathbb {X} \in \mathbb {S}} {\mathbb {E}} \left[ \mathrm {L} _ {\text {a m c e}} \left(\psi_ {W}, \mathbb {Y}\right) + \alpha \mathrm {L} _ {\text {m c e}} \left(\psi_ {W}, y _ {m i x}\right) + (1 - \alpha) \mathrm {L} _ {\text {a c e}} \left(\psi_ {W}, \mathbb {Y}\right) \right. \right. \tag {11} \\ \left. - \beta \mathrm {L} _ {\text {a m c e}} \left(\psi_ {\widehat {W}}, \mathbb {Y}\right) + (1 - \beta) \mathrm {L} _ {\text {c o s i n e}} \right], \\ \end{array}
|
| 167 |
+
$$
|
| 168 |
+
|
| 169 |
+
where $\mathrm{L}_{\mathrm{cosine}} = \sum_{n=1}^{N} \cosine(\psi_{\widehat{W}}(x_{mix}), \psi_{\widehat{W}}(x_n)) * \lambda_n$ , $\cosine(\cdot)$ is cosine similarity function, and $\psi_{\widehat{W}}$ is a teacher model whose weights are updated as an exponential moving average of the target (EMA) models weights, i.e. $\widehat{W} \gets \xi \widehat{W} + (1 - \xi)W$ . Notice that $\mathrm{L}_{\mathrm{ce}}(\psi_W, y)$ is the standard cross-entropy loss. $\mathrm{L}_{\mathrm{ace}}(\psi_W, \mathbb{Y})$ loss facilitates the backbone to provide a stable feature map at early stage so that it speeds up convergence. Target loss $\mathrm{L}_{\mathrm{amce}}(\psi_W, \mathbb{Y})$ aims to learn task-relevant information in the generated mixed samples. $\mathrm{L}_{\mathrm{mce}}(\psi_W, y_{mix})$ facilitates the capture of task-relevant information in the original mixed samples. $\mathrm{L}_{\mathrm{cosine}}$ and $\mathrm{L}_{\mathrm{amce}}(\psi_{\widehat{W}}, \mathbb{Y})$ are used to control the quality of generation mix images.
|
| 170 |
+
|
| 171 |
+
# 3.4 ADVERSARIAL OPTIMIZATION
|
| 172 |
+
|
| 173 |
+
Similarly to many existing adversarial training algorithms, it is hard to directly find a saddle point $(W^{*},\theta^{*})$ solution to the minimax problem in Eq.(11). Alternatively, a pair of gradient descent and ascent are employed to update the target network and the generator.
|
| 174 |
+
|
| 175 |
+
Consider target classifier $\psi_W(\cdot)$ with a loss function $\mathrm{L_{ce}}(\cdot)$ , where the trained generator $G_{\theta}(\cdot)$ maps multiple original samples to a mixed sample. The learning process of the target network can be defined as the minimization problem in Eq.(12):
|
| 176 |
+
|
| 177 |
+
$$
|
| 178 |
+
\begin{array}{l} W ^ {*} = \operatorname * {a r g m i n} _ {W} \left[ \underset {\mathbb {X} \in \mathbb {S}} {\mathbb {E}} \left[ \mathrm {L} _ {\text {a m c e}} \left(\psi_ {W}, \mathbb {Y}\right) + \alpha \mathrm {L} _ {\text {m c e}} \left(\psi_ {W}, y _ {\text {m i x}}\right) + (1 - \alpha) \mathrm {L} _ {\text {a c e}} \left(\psi_ {W}, \mathbb {Y}\right) \right. \right. \tag {12} \\ \left. - \beta \mathrm {L} _ {\text {a m c e}} \left(\psi_ {\widehat {W}}, \mathbb {Y}\right) + (1 - \beta) \mathrm {L} _ {\text {c o s i n e}} \right]. \\ \end{array}
|
| 179 |
+
$$
|
| 180 |
+
|
| 181 |
+
The problem in Eq. (12) is usually solved by vanilla SGD with a learning rate of $\delta$ and a batch size of $B$ , and the training procedure for each batch can be computed by Eq.(13):
|
| 182 |
+
|
| 183 |
+
$$
|
| 184 |
+
\begin{array}{l} W (t + 1) = W (t) - \delta \nabla_ {W} \frac {1}{K} \sum_ {k = 1} ^ {K} \left[ \mathrm {L} _ {\text {a m c e}} \left(\psi_ {W}, \mathbb {Y}\right) + \alpha \mathrm {L} _ {\text {m c e}} \left(\psi_ {W}, y _ {\text {m i x}}\right) + (1 - \alpha) \mathrm {L} _ {\text {a c e}} \left(\psi_ {W}, \mathbb {Y}\right) \right. \tag {13} \\ - \beta \mathrm {L} _ {\text {a m c e}} \left(\psi_ {\widehat {W}}, \mathbb {Y}\right) + (1 - \beta) \mathrm {L} _ {\text {c o s i n e}} ]. \\ \end{array}
|
| 185 |
+
$$
|
| 186 |
+
|
| 187 |
+
where $K$ is the number of mixed images or image sets produced from patch set $B$ . As the cosine similarity and the teacher model are independent of $W$ , Eq.(13) can be rewritten as Eq.(14):
|
| 188 |
+
|
| 189 |
+
$$
|
| 190 |
+
W (t + 1) = W (t) - \delta \nabla_ {W} \frac {1}{K} \sum_ {k = 1} ^ {K} \left[ \mathrm {L} _ {\text {a m c e}} \left(\psi_ {W}, \mathbb {Y}\right) + \alpha \mathrm {L} _ {\text {m c e}} \left(\psi_ {W}, y _ {m i x}\right) + \left(1 - \alpha\right) \mathrm {L} _ {\text {a c e}} \left(\psi_ {W}, \mathbb {Y}\right) \right]. \tag {14}
|
| 191 |
+
$$
|
| 192 |
+
|
| 193 |
+
Note that the training procedure can be regarded as an average over $K$ instances of gradient computation, which can reduce gradient variance and accelerate the convergence of the target network. However, training may suffer easily from over-fitting due to the limited training data over a long training epoch. To overcome this problem, different from AutoMix (Liu et al., 2022d), our mixup augmentation generator generates a set of harder mixed samples to increase the loss of the target classifier, which results in a minimax problem to self-train the network. Such a self-supervised objective may be sufficiently challenging to prevent the target classifier from overfitting the objective. Therefore, the objective is defined as the following maximization problem in Eq.(15):
|
| 194 |
+
|
| 195 |
+
$$
|
| 196 |
+
\theta^ {*} = \operatorname {a r g m a x} _ {\theta} \left[ \underset {\mathbb {X} \in \mathbb {S}} {\mathbb {E}} \left[ \mathrm {L} _ {\text {a m c e}} \left(\psi_ {W}, \mathbb {Y}\right) - \beta \mathrm {L} _ {\text {a m c e}} \left(\psi_ {\widehat {W}}, \mathbb {Y}\right) + (1 - \beta) \mathrm {L} _ {\text {c o s i n e}} \right] \right]. \tag {15}
|
| 197 |
+
$$
|
| 198 |
+
|
| 199 |
+
To solve the above problem, we employ a gradient ascent to update the parameter with a learning rate of $\gamma$ , which is defined in Eq.(16):
|
| 200 |
+
|
| 201 |
+
$$
|
| 202 |
+
\theta (t + 1) = \theta (t) + \gamma \nabla_ {W} \frac {1}{K} \sum_ {k = 1} ^ {K} \left[ \mathrm {L} _ {\text {a m c e}} \left(\psi_ {W}, \mathbb {Y}\right) - \beta \mathrm {L} _ {\text {a m c e}} \left(\psi_ {\widehat {W}}, \mathbb {Y}\right) + (1 - \beta) \mathrm {L} _ {\text {c o s i n e}} \right]. \tag {16}
|
| 203 |
+
$$
|
| 204 |
+
|
| 205 |
+
Intuitively, the optimization of Eq.(16) is the combination of two sub-tasks, the maximization of $\mathrm{L_{ce}}(\psi_W(x_{mix},y_{mix}))$ and the minimization of $\beta \mathrm{L_{amce}}(\psi_{\widehat{W}},\mathbb{Y}) - (1 - \beta)\mathrm{L_{cosine}}$ . This tends to push
|
| 206 |
+
|
| 207 |
+
the synthesized mixed samples far away from the real samples to increase diversity, while ensuring the synthesized mixed samples are recognizable for a teacher model and kept, within a constraint similarity to the feature representation of original images, so as to avoid collapsing the inherent meanings of images. This scheme enables generating challenging samples by closely tracking the updates of the classifier. We provide some mixed samples in Appendix B.2 and B.3.
|
| 208 |
+
|
| 209 |
+
# 4 EXPERIMENTS
|
| 210 |
+
|
| 211 |
+
To estimate our approach performance, we conducted extensive experiments on seven classification benchmarks, namely CIFAR100 (Krizhevsky et al., 2009), Tiny-ImageNet (Chrabaszcz et al., 2017), ImageNet-1K (Krizhevsky et al., 2012), CUB-200 (Wah et al., 2011), FGVC-Aircraft (Maji et al., 2013) and Standford-Cars (Krause et al., 2013) (Appendix A.1). For fair assessment, we compare our AdAutoMixup with some current Mixup methods, i.e. Mixup (Zhang et al., 2017), CutMix (Yun et al., 2019), ManifoldMix (Verma et al., 2019), FMix (Harris et al., 2020), ResizeMix (Qin et al., 2020), SaliencyMix (Uddin et al., 2020), PuzzleMix (Kim et al., 2020) and AutoMix (Liu et al., 2022d). To verify our approach generalizability, five baseline networks, namely ResNet18, ResNet34, ResNet50 (He et al., 2016), ResNeXt50 (Xie et al., 2017), SwinTransformer (Liu et al., 2021) and ConvNeXt(Liu et al., 2022b), are used to compute classification accuracy. We have implemented our algorithm on the open-source library OpenMixup (Li et al., 2022b). Some common parameters follow the experimental settings of AutoMix and we provide our own hyperparameters in Appendix A.2. For all classification results, we report the mean performance of 3 trials where the median of top-1 test accuracy in the last 10 training epochs is recorded for each trial. To facilitate comparison, we mark the best and second best results in bold and cyan.
|
| 212 |
+
|
| 213 |
+
# 4.1 CLASSIFICATION RESULTS
|
| 214 |
+
|
| 215 |
+
# 4.1.1 DATASET CLASSIFICATION
|
| 216 |
+
|
| 217 |
+
We first train ResNet18 and ResNeXt50 on CIFAR100 for 800 epochs, using the following experimental setting: The basic learning rate is 0.1, dynamically adjusted by cosine scheduler, SGD (Loshchilov & Hutter, 2016) optimizer with momentum of 0.9, weight decay of 0.0001, batch size of 100. To train ViT-based models, e.g. Swin-Tiny Transformer and ConvNeXt-Tiny, we train them with AdamW (Loshchilov & Hutter, 2019) optimizer with weight decay of 0.05, batch size of 100, 200 epochs. On Tiny-ImageNet, except for a learning rate of 0.2 and training over 400 epochs, training settings are similar to the ones used in CIFAR100. On ImageNet-1K, we train ResNet18, ResNet34 and ResNet50 for 100 epochs using PyTorch-style setting. The experiments implementation details are provided in Appendix A.3
|
| 218 |
+
|
| 219 |
+
Table 1 and Fig. 1 show that our method outperforms the existing approaches on CIFAR100. After training by our approach, ResNet18 and ResNeXt50 achieve an accuracy improvement of $0.28\%$ and $0.58\%$ w.r.t the second best results, respectively. Similarly, ViT-based approaches achieve the highest classification accuracy of $84.33\%$ and $83.54\%$ and outperform the previous best approaches with an improvement of $1.66\%$ and $0.24\%$ . On the Tiny-ImageNet datasets, our AdAutoMix consistently outperforms existing approaches in terms of improving the classification performance of ResNet18 and ResNeXt50, i.e. $1.86\%$ and $2.17\%$ significant improvement w.r.t the second best approaches. Also, Table 1 shows that AdAutoMix achieves an accuracy improvement $(0.36\%$ for ResNet18, $0.3\%$ for ResNet34, and $0.13\%$ ResNet50) on the ImageNet-1K large scale dataset.
|
| 220 |
+
|
| 221 |
+
Table 1: Top-1 accuracy $(\%)\uparrow$ of mixup methods on CIFAR-100, Tiny-ImageNet and ImageNet-1K.
|
| 222 |
+
|
| 223 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">CIFAR100</td><td colspan="2">CIFAR100</td><td colspan="2">Tiny-ImageNet</td><td colspan="3">ImageNet-1K</td></tr><tr><td>ResNet18</td><td>ResNeXt50</td><td>Swin-T</td><td>ConvNeXt-T</td><td>ResNet18</td><td>ResNeXt50</td><td>ResNet18</td><td>ResNet34</td><td>ResNet50</td></tr><tr><td>Vanilla</td><td>78.04</td><td>81.09</td><td>78.41</td><td>78.70</td><td>61.68</td><td>65.04</td><td>70.04</td><td>73.85</td><td>76.83</td></tr><tr><td>MixUp</td><td>79.12</td><td>82.10</td><td>76.78</td><td>81.13</td><td>63.86</td><td>66.36</td><td>69.98</td><td>73.97</td><td>77.12</td></tr><tr><td>CutMix</td><td>78.17</td><td>81.67</td><td>80.64</td><td>82.46</td><td>65.53</td><td>66.47</td><td>68.95</td><td>73.58</td><td>77.17</td></tr><tr><td>SaliencyMix</td><td>79.12</td><td>81.53</td><td>80.40</td><td>82.82</td><td>64.60</td><td>66.55</td><td>69.16</td><td>73.56</td><td>77.14</td></tr><tr><td>FMix</td><td>79.69</td><td>81.90</td><td>80.72</td><td>81.79</td><td>63.47</td><td>65.08</td><td>69.96</td><td>74.08</td><td>77.19</td></tr><tr><td>PuzzleMix</td><td>81.13</td><td>82.85</td><td>80.33</td><td>82.29</td><td>65.81</td><td>67.83</td><td>70.12</td><td>74.26</td><td>77.54</td></tr><tr><td>ResizeMix</td><td>80.01</td><td>81.82</td><td>80.16</td><td>82.53</td><td>63.74</td><td>65.87</td><td>69.50</td><td>73.88</td><td>77.42</td></tr><tr><td>AutoMix</td><td>82.04</td><td>83.64</td><td>82.67</td><td>83.30</td><td>67.33</td><td>70.72</td><td>70.50</td><td>74.52</td><td>77.91</td></tr><tr><td>AdAutoMix</td><td>82.32</td><td>84.22</td><td>84.33</td><td>83.54</td><td>69.19</td><td>72.89</td><td>70.86</td><td>74.82</td><td>78.04</td></tr><tr><td>Gain</td><td>+0.28</td><td>+0.58</td><td>+1.66</td><td>+0.24</td><td>+1.86</td><td>+2.17</td><td>+0.36</td><td>+0.30</td><td>+0.13</td></tr></table>
|
| 224 |
+
|
| 225 |
+
Table 2: Accuracy $(\%)\uparrow$ of mixup approaches on CUB-200, FGVC-Aircrafts and Standford-Cars.
|
| 226 |
+
|
| 227 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">CUB-200</td><td colspan="2">FGVC-Aircrafts</td><td colspan="2">Standford-Cars</td></tr><tr><td>ResNet18</td><td>ResNet50</td><td>ResNet18</td><td>ResNeXt50</td><td>ResNet18</td><td>ResNeXt50</td></tr><tr><td>Vanilla</td><td>77.68</td><td>82.38</td><td>80.23</td><td>85.10</td><td>86.32</td><td>90.15</td></tr><tr><td>MixUp</td><td>78.39</td><td>82.98</td><td>79.52</td><td>85.18</td><td>86.27</td><td>90.81</td></tr><tr><td>CutMix</td><td>78.40</td><td>83.17</td><td>78.84</td><td>84.55</td><td>87.48</td><td>91.22</td></tr><tr><td>ManifoldMix</td><td>79.76</td><td>83.76</td><td>80.68</td><td>86.60</td><td>85.88</td><td>90.20</td></tr><tr><td>SaliencyMix</td><td>77.95</td><td>81.71</td><td>80.02</td><td>84.31</td><td>86.48</td><td>90.60</td></tr><tr><td>FMix</td><td>77.28</td><td>83.34</td><td>79.36</td><td>86.23</td><td>87.55</td><td>90.90</td></tr><tr><td>PuzzleMix</td><td>78.63</td><td>83.83</td><td>80.76</td><td>86.23</td><td>87.78</td><td>91.29</td></tr><tr><td>ResizeMix</td><td>78.50</td><td>83.41</td><td>78.10</td><td>84.08</td><td>88.17</td><td>91.36</td></tr><tr><td>AutoMix</td><td>79.87</td><td>83.88</td><td>81.37</td><td>86.72</td><td>88.89</td><td>91.38</td></tr><tr><td>AdAutoMix</td><td>80.88</td><td>84.57</td><td>81.73</td><td>87.16</td><td>89.19</td><td>91.59</td></tr><tr><td>Gain</td><td>+1.01</td><td>+0.69</td><td>+0.36</td><td>+0.44</td><td>+0.30</td><td>+0.21</td></tr></table>
|
| 228 |
+
|
| 229 |
+
# 4.1.2 FINE-GRAINED CLASSIFICATION
|
| 230 |
+
|
| 231 |
+
On CUB-200, FGVC-Aircrafts, and Standford-Cars, we fine-tune pretrained ResNet18, ResNet50, and ResNeXt50 using SGD optimizer with momentum of 0.9, weight decay of 0.0005, batch size of 16, 200 epochs, learning rate of 0.001, dynamically adjusted by cosine scheduler. The results in Table 2 show that AdAutoMix achieves the best performance and significantly improves the performance of vanilla (3.20%/2.19% on CUB-200, 1.5%/2.06% on Aircraft and 2.87%/1.44% on Cras), which implies that AdAutoMix is also robust to more challenging scenarios.
|
| 232 |
+
|
| 233 |
+
# 4.2 CALIBRATION
|
| 234 |
+
|
| 235 |
+
DNNs are prone to suffer from getting overconfident in classification tasks. Mixup methods can effectively alleviate this problem. To this end, we compute the expected calibration error (ECE) of various mixup approaches on the CIFAR100 dataset. It can be seen from the experimental results in Fig. 4 that our method achieves the lowest ECE, i.e. $3.2\%$ , w.r.t existing approaches. We provide more experimental results in Table 6 in Appendix A.5
|
| 236 |
+
|
| 237 |
+

|
| 238 |
+
Figure 4: Calibration plots of Mixup variant on CIFAR100 using ResNet18.
|
| 239 |
+
|
| 240 |
+
# 4.3 ROBUSTNESS
|
| 241 |
+
|
| 242 |
+
We carried out experiments on CIFAR100-C (Hendrycks & Dietterich, 2019) to verify robustness against corruption. A corrupted dataset is manually generated to include 19 different corruption types (noise, blur, fog, brightness, etc.). We compare our AdAutoMix with some popular mixup algorithms: CutMix, FMix, PuzzleMix, and AutoMix. Table 4 shows that our approach achieves the highest recognition accuracy for both clean and corrupted data, i.e. $1.53\%$ and $0.40\%$ classification accuracy improvement w.r.t AutoMix. We further investigate robustness against the FGSM (Goodfellow et al., 2015) white box attack of $8/255 \ell_{\infty}$ epsilon ball following (Zhang et al., 2017). Our AdAutoMix significantly outperforms existing methods, as shown in Table 4.
|
| 243 |
+
|
| 244 |
+
# 4.4 OCCLUSION ROBUSTNESS
|
| 245 |
+
|
| 246 |
+
To analyze the AdAutoMix robustness against random occlusion (Naseer et al., 2021), we build image sets by randomly masking images from datasets CIFAR100 and CUB200 with $16 \times 16$ patches, using different mask ratios (0- $100\%$ ). We input the resulting occluded images into two classifiers, Swin-Tiny Transformer and ResNet-50, trained by various Mixup models to
|
| 247 |
+
|
| 248 |
+
compute test accuracy. From the results in Fig. 5 and in Table 7 in Appendix A.6, we observe that AdAutoMix achieves the highest accuracy with different occlusion ratios.
|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
Figure 5: Robustness against image occlusion with different occlusion ratios.
|
| 252 |
+
|
| 253 |
+
Table 3: Top-1 accuracy $(\%)\uparrow$ with ResNet50 on CUB200 and Standford-Cars.
|
| 254 |
+
|
| 255 |
+
<table><tr><td>Dataset</td><td>Vanilla</td><td>MixUp</td><td>CutMix</td><td>PuzzleMix</td><td>AutoMix</td><td>AdAutoMix</td></tr><tr><td>CUB</td><td>81.76</td><td>82.79</td><td>81.67</td><td>82.59</td><td>82.93</td><td>83.36(+0.43)</td></tr><tr><td>Cars</td><td>88.88</td><td>89.45</td><td>88.99</td><td>89.37</td><td>88.71</td><td>89.65(+0.20)</td></tr></table>
|
| 256 |
+
|
| 257 |
+
# 4.5 TRANSFER LEARNING
|
| 258 |
+
|
| 259 |
+
We further study the transferable abilities of the features learned by AdAutoMix for downstream classification tasks. The experimental settings in subsection 4.1.2 are used for transfer learning on CUB-200 and Standford-Cars, except for training now over 100 epochs. ResNet50 trained on ImageNet-1K is finetuned on CUB200 and Standford-Cars for classification. Table 3 shows that AdAutoMix achieves the best performance, which proves the efficacy of our approach for downstream tasks.
|
| 260 |
+
|
| 261 |
+
# 4.6 ABLATION EXPERIMENT
|
| 262 |
+
|
| 263 |
+
In AdAutoMix, four hyperparameters, namely the number of input images $N$ , the weights $\alpha$ , $\beta$ , and mixed ratios $\lambda$ , which are important to achieve high performance, are fixed in all experiments. To save time, we train the classifier on ResNet18 for 200 epochs by our AdAutoMixup. The accuracy of ResNet18 with different $\alpha$ , $\beta$ , $N$ , and $\lambda$ are shown in Fig. 6 (a), (b), (c), and (d). Also, the classification accuracy of AdAutoMixup with different $\lambda$ and $N$ are depicted in Table. 9 and Table. 10 in Appendix A.8. AdAutoMix, with $N = 3$ , $\alpha = 0.5$ , $\beta = 0.3$ , and $\lambda = 1$ as default, achieves the best performances on the various datasets. In addition, two regularization terms, $\mathrm{L}_{\mathrm{mce}}(\psi_W, y_{mix})$ and $\mathrm{L}_{\mathrm{ace}}(\psi_W, \mathbb{Y})$ , attempt to improve
|
| 264 |
+
|
| 265 |
+
Table 4: Top-1 accuracy and FGSM error of ResNet18 with other methods.
|
| 266 |
+
|
| 267 |
+
<table><tr><td>Method</td><td>Clean Acc(%)↑</td><td>Corruption Acc(%)↑</td><td>FGSM Error(%)↓</td></tr><tr><td>CutMix</td><td>79.45</td><td>46.66</td><td>88.24</td></tr><tr><td>FMix</td><td>78.91</td><td>50.58</td><td>88.35</td></tr><tr><td>PuzzleMix</td><td>79.96</td><td>51.04</td><td>80.52</td></tr><tr><td>AutoMix</td><td>80.02</td><td>50.75</td><td>82.67</td></tr><tr><td>AdAutoMix</td><td>81.55</td><td>51.44</td><td>75.66</td></tr></table>
|
| 268 |
+
|
| 269 |
+
Table 5: Ablation experiments on CIFAR100 based on ResNet18 and ResNeXt50.
|
| 270 |
+
|
| 271 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">CIFAR100</td></tr><tr><td>ResNet18</td><td>ResNeXt50</td></tr><tr><td>Base(N=3)</td><td>79.38</td><td>82.84</td></tr><tr><td>+0.5Lmce + 0.5Lace</td><td>80.04</td><td>84.12</td></tr><tr><td>-0.3Lamce + 0.7Lcosine</td><td>81.55</td><td>84.40</td></tr></table>
|
| 272 |
+
|
| 273 |
+
classifier robustness, and another two regularization terms, namely cosine similarity $\mathrm{L}_{\mathrm{cosine}}$ and EMA model $\mathrm{L}_{\mathrm{amce}}(\psi_{\widehat{W}}, \mathbb{Y})$ , aim to avoid the collapsing of the inherent meaning of images in AdAutoMix. We thus carry out experiments to evaluate the performance of each module concerning classifier performance improvement. To facilitate the description, we remove the four modules from AdAutoMix and denote the resulting approach as basic AdAutoMix. Then, we gradually incorporate the two modules $\mathrm{L}_{\mathrm{mce}}(\psi_W, \mathbb{Y})$ and $\mathrm{L}_{\mathrm{ace}}(\psi_W, \mathbb{Y})$ , and the two modules $\mathrm{L}_{\mathrm{amce}}(\psi_{\widehat{W}}, \mathbb{Y})$ and $\mathrm{L}_{\mathrm{cosine}}$ , and compute the classification accuracy. The experimental results in Table. 5 show that the $\mathrm{L}_{\mathrm{mce}}(\psi_W, y_{mix})$ and $\mathrm{L}_{\mathrm{ace}}(\psi_W, \mathbb{Y})$ improve classifier accuracy by about 0.66%. However, after incorporating $\mathrm{L}_{\mathrm{amce}}(\psi_{\widehat{W}}, \mathbb{Y})$ and $\mathrm{L}_{\mathrm{cosine}}$ to constraint the synthesized mixed images, we observe that the classification accuracy is significantly increased, namely 1.51% accuracy improvement, which implies that these two modules are capable of controlling the quality of generated images in the adversarial training. Also, we show the accuracy of our approach with gradually increasing individual regularization terms in Table. 8 in the Appendix. A.8. There is a similar trend that each regularization term improves accuracy.
|
| 274 |
+
|
| 275 |
+

|
| 276 |
+
Figure 6: Ablation of hyperparameter $\alpha$ , $\beta$ , input samples and $\lambda$ of AdAutoMix on CIFAR100.
|
| 277 |
+
|
| 278 |
+
# 5 CONCLUSION
|
| 279 |
+
|
| 280 |
+
In this paper, we have proposed AdAutoMixup, a framework that jointly optimizes the target classifier and the mixing image generator in an adversarial way. Specifically, the generator produces hard mixed samples to increase the classification loss while the classifier is trained on the hard samples to improve generalization. In addition, the generator can handle multiple sample mixing cases. The experimental results on the six datasets demonstrate the efficacy of our approach.
|
| 281 |
+
|
| 282 |
+
# ACKNOWLEDGEMENT
|
| 283 |
+
|
| 284 |
+
This work was supported in part by the Scientific Innovation 2030 Major Project for New Generation of AI under Grant 2020AAA0107300, in part by the National Natural Science Foundation of China (Grant No. 61976030), the Science Fund for Creative Research Groups of the Chongqing University (Grant No. CXQT21034), in part by the National Natural Science Foundation of China (Grant No. 62221005), in part by the National Natural Science Foundation of China (Grant No. U22A2096), in part by the Research on JY human-machine hybrid enhanced intelligence theory and method for command and decision-making (Grant No. 8091B012112), and in part by the Fund of Henan Provincial Science and Technology Department (Grant No. 222102210301). We thank all members who contribute to the OpenMixup community.
|
| 285 |
+
|
| 286 |
+
# REFERENCES
|
| 287 |
+
|
| 288 |
+
Antreas Antoniou, Amos J. Storkey, and Harrison Edwards. Data augmentation generative adversarial networks. ArXiv, abs/1711.04340, 2017. 3
|
| 289 |
+
Alexey Bochkovskiy, Chien-Yao Wang, and Hong-Yuan Mark Liao. Yolov4: Optimal speed and accuracy of object detection. ArXiv, abs/2004.10934, 2020. 1
|
| 290 |
+
Jie-Neng Chen, Shuyang Sun, Ju He, Philip Torr, Alan Yuille, and Song Bai. Transmix: Attend to mix for vision transformers, 2021. 3
|
| 291 |
+
Mengzhao Chen, Mingbao Lin, Zhihang Lin, Yu xin Zhang, Fei Chao, and Rongrong Ji. Smmix: Self-motivated image mixing for vision transformers. ArXiv, abs/2212.12977, 2022. 3
|
| 292 |
+
Hyeong Kyu Choi, Joonmyung Choi, and Hyunwoo J. Kim. Tokenmixup: Efficient attention-guided token-level data augmentation for transformers. ArXiv, abs/2210.07562, 2022. 3
|
| 293 |
+
Patryk Chrabaszcz, Ilya Loshchilov, and Frank Hutter. A downsampled variant of imagenet as an alternative to the cifar datasets. arXiv preprint arXiv:1707.08819, 2017. 7, 14
|
| 294 |
+
Ekin D Cubuk, Barret Zoph, Dandelion Mane, Vijay Vasudevan, and Quoc V Le. Autoaugment: Learning augmentation policies from data. arXiv preprint arXiv:1805.09501, 2018. 1
|
| 295 |
+
Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pp. 702-703, 2020. 1
|
| 296 |
+
Terrance DeVries and Graham W Taylor. Improved regularization of convolutional neural networks with cutout. arXiv preprint arXiv:1708.04552, 2017. 1
|
| 297 |
+
Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations (ICLR), 2021. 3
|
| 298 |
+
Jiemin Fang, Yuzhu Sun, Kangjian Peng, Qian Zhang, Yuan Li, Wenyu Liu, and Xinggang Wang. Fast neural network adaptation via parameter remapping and architecture search. arXiv preprint arXiv:2001.02525, 2020. 1
|
| 299 |
+
Ian J. Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. In International Conference on Learning Representations (ICLR), 2015. 8
|
| 300 |
+
Ethan Harris, Antonia Marcu, Matthew Painter, Mahesan Nirajan, and Adam Prügel-Bennett Jonathon Hare. Fmix: Enhancing mixed sample data augmentation. arXiv preprint arXiv:2002.12047, 2(3):4, 2020. 3, 7
|
| 301 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770-778, 2016. 3, 7
|
| 302 |
+
|
| 303 |
+
Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations. arXiv preprint arXiv:1903.12261, 2019. 8
|
| 304 |
+
Shaoli Huang, Xinchao Wang, and Dacheng Tao. Snapmix: Semantically proportional mixing for augmenting fine-grained data. In AAAI Conference on Artificial Intelligence, 2020. 3
|
| 305 |
+
Jongheon Jeong, Sejun Park, Minkyu Kim, Heung-Chang Lee, Do-Guk Kim, and Jinwoo Shin. Smoothmix: Training confidence-calibrated smoothed classifiers for certified robustness. In Neural Information Processing Systems, 2021. 3
|
| 306 |
+
Jang-Hyun Kim, Wonho Choo, and Hyun Oh Song. Puzzle mix: Exploiting saliency and local statistics for optimal mixup. In International Conference on Machine Learning, pp. 5275-5285. PMLR, 2020. 1, 3, 7
|
| 307 |
+
Jang-Hyun Kim, Wonho Choo, Hosan Jeong, and Hyun Oh Song. Co-mixup: Saliency guided joint mixup with supermodular diversity. arXiv preprint arXiv:2102.03065, 2021. 3
|
| 308 |
+
Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In 4th International IEEE Workshop on 3D Representation and Recognition (3dRR-13), 2013. 7, 14
|
| 309 |
+
Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 7, 14
|
| 310 |
+
Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in neural information processing systems, pp. 1097-1105, 2012. 1, 7, 14
|
| 311 |
+
Siyuan Li, Zicheng Liu, Zedong Wang, Di Wu, Zihan Liu, and Stan Z. Li. Boosting discriminative visual representation learning with scenario-agnostic mixup. ArXiv, abs/2111.15454, 2021. 1
|
| 312 |
+
Siyuan Li, Zedong Wang, Zicheng Liu, Cheng Tan, Haitao Lin, Di Wu, Zhiyuan Chen, Jiangbin Zheng, and Stan Z. Li. Moganet: Multi-order gated aggregation network. arXiv preprint arXiv:2211.03295, 2022a. 1
|
| 313 |
+
Siyuan Li, Zedong Wang, Zicheng Liu, Di Wu, and Stan Z. Li. Openmixup: Open mixup toolbox and benchmark for visual representation learning. arXiv preprint arXiv:2209.04851, 2022b. 7
|
| 314 |
+
Siyuan Li, Di Wu, Fang Wu, Zelin Zang, Kai Wang, Lei Shang, Baigui Sun, Haoyang Li, and Stan.Z.Li. Architecture-agnostic masked image modeling - from vit back to cnn. arXiv preprint arXiv:2205.13943, 2022c. 1
|
| 315 |
+
Siyuan Li, Weiyang Jin, Zedong Wang, Fang Wu, Zicheng Liu, Cheng Tan, and Stan Z. Li. Semireward: A general reward model for semi-supervised learning. In International Conference on Learning Representations, 2024. 1
|
| 316 |
+
Yonggang Li, Guosheng Hu, Yongtao Wang, Timothy Hospedales, Neil M Robertson, and Yongxin Yang. Differentiable automatic data augmentation. In European Conference on Computer Vision, pp. 580-595. Springer, 2020. 1, 16
|
| 317 |
+
Jihao Liu, B. Liu, Hang Zhou, Hongsheng Li, and Yu Liu. Tokenmix: Rethinking image mixing for data augmentation in vision transformers. In European Conference on Computer Vision, 2022a. 3
|
| 318 |
+
Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In International Conference on Computer Vision (ICCV), 2021. 3, 7
|
| 319 |
+
Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s, 2022b. 7
|
| 320 |
+
Zicheng Liu, Siyuan Li, Ge Wang, Cheng Tan, Lirong Wu, and Stan Z. Li. Decoupled mixup for data-efficient learning. *ArXiv*, abs/2203.10761, 2022c. 1
|
| 321 |
+
|
| 322 |
+
Zicheng Liu, Siyuan Li, Di Wu, Zihan Liu, Zhiyuan Chen, Lirong Wu, and Stan Z Li. Automix: Unveiling the power of mixup for stronger classifiers. In European Conference on Computer Vision, pp. 441-458. Springer, 2022d. 2, 3, 6, 7
|
| 323 |
+
Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 7
|
| 324 |
+
Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations (ICLR), 2019. 7
|
| 325 |
+
Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arXiv preprint arXiv:1306.5151, 2013. 7, 14
|
| 326 |
+
Muzammal Naseer, Kanchana Ranasinghe, Salman Hameed Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Intriguing properties of vision transformers. In Neural Information Processing Systems, 2021. 8
|
| 327 |
+
Joonhyung Park, June Yong Yang, Jinwoo Shin, Sung Ju Hwang, and Eunho Yang. Saliency grafting: Innocuous attribution-guided mixup with calibrated label mixing. In AAAI Conference on Artificial Intelligence, 2021. 1, 3
|
| 328 |
+
Jie Qin, Jiemin Fang, Qian Zhang, Wenyu Liu, Xingang Wang, and Xinggang Wang. Resizemix: Mixing data with preserved object information and true labels. arXiv preprint arXiv:2012.11101, 2020. 1, 7
|
| 329 |
+
Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. arXiv preprint arXiv:1506.01497, 2015. 1
|
| 330 |
+
Ramprasaath R. Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. arXiv preprint arXiv:1610.02391, 2019. 3
|
| 331 |
+
Connor Shorten and Taghi M Khoshgoftaar. A survey on image data augmentation for deep learning. Journal of Big Data, 6(1):1-48, 2019. 1
|
| 332 |
+
AFM Uddin, Mst Monira, Wheemyung Shin, TaeChoong Chung, Sung-Ho Bae, et al. Saliencymix: A saliency guided data augmentation strategy for better regularization. arXiv preprint arXiv:2006.01791, 2020. 1, 3, 7
|
| 333 |
+
Ashish Vaswani, Noam M. Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NIPS, 2017. 1
|
| 334 |
+
Vikas Verma, Alex Lamb, Christopher Beckham, Amir Najafi, Ioannis Mitlagkas, David Lopez-Paz, and Yoshua Bengio. Manifold mixup: Better representations by interpolating hidden states. In International Conference on Machine Learning, pp. 6438-6447, 2019. 1, 3, 7
|
| 335 |
+
Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. California Institute of Technology, 2011. 7, 14
|
| 336 |
+
Devesh Walawalkar, Zhiqiang Shen, Zechun Liu, and Marios Savvides. Attentive cutmix: An enhanced data augmentation approach for deep learning based image classification. In ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3642-3646, 2020. 1, 3
|
| 337 |
+
Saining Xie, Ross Girshick, Piotr Dolkar, Zhuowen Tu, and Kaiming He. Aggregated residual transformations for deep neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1492-1500, 2017. 7
|
| 338 |
+
Minghao Xu, Jian Zhang, Bingbing Ni, Teng Li, Chengjie Wang, Qi Tian, and Wenjun Zhang. Adversarial domain adaptation with domain mixup. In AAAI Conference on Artificial Intelligence, 2019. 3
|
| 339 |
+
Lingfeng Yang, Xiang Li, Borui Zhao, Renjie Song, and Jian Yang. Recursivelymix: Mixed learning with history. ArXiv, abs/2203.06844, 2022. 3
|
| 340 |
+
|
| 341 |
+
Sangdoo Yun, Dongyoon Han, Seong Joon Oh, Sanghyuk Chun, Junsuk Choe, and Youngjoon Yoo. Cutmix: Regularization strategy to train strong classifiers with localizable features. In Proceedings of the International Conference on Computer Vision (ICCV), pp. 6023-6032, 2019. 1, 3, 7
|
| 342 |
+
Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. arXiv preprint arXiv:1710.09412, 2017. 1, 3, 7, 8, 16
|
| 343 |
+
Jiajin Zhang, Hanqing Chao, Amit Dhurandhar, Pin-Yu Chen, Ali Tajer, Yangyang Xu, and Pingkun Yan. Spectral adversarial mixup for few-shot unsupervised domain adaptation. ArXiv, abs/2309.01207, 2023. 3, 16
|
| 344 |
+
Long Zhao, Ting Liu, Xi Peng, and Dimitris N. Metaxas. Maximum-entropy adversarial data augmentation for improved generalization and robustness. ArXiv, abs/2010.08001, 2020. 3, 16
|
| 345 |
+
QiHao Zhao, Yangyu Huang, Wei Hu, Fan Zhang, and J. Liu. Mixpro: Data augmentation with maskmix and progressive attention labeling for vision transformer. ArXiv, abs/2304.12043, 2023. 3
|
| 346 |
+
|
| 347 |
+
# A APPENDIX
|
| 348 |
+
|
| 349 |
+
# A.1 DATASET INFORMATION
|
| 350 |
+
|
| 351 |
+
We briefly introduce image datasets used in this paper. (1) CIFAR-100 (Krizhevsky et al., 2009) contains 50,000 training images and 10,000 test images in $32 \times 32$ resolutions, with 100 class settings. (2) Tiny-ImageNet (Chrabaszcz et al., 2017) contains 10,000 training images and 10,000 validation images of 200 classes in $64 \times 64$ resolutions. (3) ImageNet-1K (Krizhevsky et al., 2012) contains 1,281,167 training images and 50,000 validation images of 1000 classes. (4) CUB-200-2011 (Wah et al., 2011) contains 11,788 images from 200 wild bird species. FGVC-Aircrafts (Maji et al., 2013) contains 10,000 images of 100 classes of aircrafts and Standford-Cars (Krause et al., 2013) contains 8,144 training images and 8,041 test images of 196 classes.
|
| 352 |
+
|
| 353 |
+
# A.2 EXPERIMENTS HYPERPARAMETERS DETAILS
|
| 354 |
+
|
| 355 |
+
In our work, the feature layer $l$ is set to 3, and the momentum coefficient starts from $\xi = 0.999$ and is increased to 1 in a cosine curve. Also, AdAutoMix uses the same set of hyperparameters in all experiments as follows: $\alpha = 0.5$ , $\beta = 0.3$ , $\lambda = 1.0$ , $N = 3$ or $N = 2$ .
|
| 356 |
+
|
| 357 |
+
# A.3 EXPERIMENTS IMPLEMENTATION DETAILS
|
| 358 |
+
|
| 359 |
+
On CIFAR100, RandomFlip and RandomCrop with 4-pixel padding are used as basic data augmentations for images with size $32 \times 32$ . For ResNet18 and ResNeXt50, we use the following experimental setting: SGD optimizer with momentum of 0.9, weight decay of 0.0001, batch size of 100, and training with 800 epochs. The basic learning rate is 0.1, dynamically adjusted by the cosine scheduler; CIFAR version of ResNet variants are used, i.e., replacing the $7 \times 7$ convolution and MaxPooling by a $3 \times 3$ convolution. To train Vit-based approaches, e.g. Swin-Tiny Transformer, we resize images to $224 \times 224$ and train them with AdamW optimizer with weight decay of 0.05, batch size of 100, and total training 200 epochs. The basic learning rate is 0.0005, dynamically adjusted by the cosine scheduler. For ConvNeXt-Tiny training, the images keep the $32 \times 32$ resolution, and we train it based on the setting of Vit-based approaches except for the basic learning rate of 0.002. the $\alpha$ and $\beta$ are set to 0.5 and 0.3 for CIFAR on ResNet18 and ResNeXt50.
|
| 360 |
+
|
| 361 |
+
On Tiny-ImageNet, RandomFlip and RandomResizedCrop for $64 \times 64$ are used as basic data augmenting. Except for a learning rate of 0.2 and training over 400 epochs, training settings are similar to the ones used in CIFAR100.
|
| 362 |
+
|
| 363 |
+
On ImageNet-1K, we use a Pytorch-style training setup, which optimizes the model for 100 epochs by SGD optimizer with a batch size of 256, a basic learning rate of 0.1, the SGD weight decay of 0.0001, and the SGD momentum of 0.9.
|
| 364 |
+
|
| 365 |
+
On CUB-200, FVGC-Aircrafts and Standford-Cars, we use the official PyTorch pre-trained models on ImageNet-1k are adopted as initialization, using SGD optimizer with momentum of 0.9, weight decay of 0.0005, batch size of 16, 200 epochs, learning rate of 0.001, dynamically adjusted by cosine scheduler. the $\alpha$ and $\beta$ are set to 0.5 and 0.1.
|
| 366 |
+
|
| 367 |
+
# A.4 DETAILS OF THE EXPERIMENTS FOR THE OTHER MIXUP
|
| 368 |
+
|
| 369 |
+
You can access detailed experimental settings and results at https://github.com/Westlake-AI/openmixup, which also provides the open-source code for most of the compared Mixup methods.
|
| 370 |
+
|
| 371 |
+
# A.5 RESULTS OF CALIBRATION
|
| 372 |
+
|
| 373 |
+
Table 6: The expected calibration error (ECE) of ResNet18 and Swin-Tiny Transformer (Swin-Tiny) with various Mixup methods trained on CIFAR100 dataset for 200 epochs.
|
| 374 |
+
|
| 375 |
+
<table><tr><td>Classifiers</td><td>Mixup</td><td>CutMix</td><td>FMix</td><td>GridMix</td><td>PuzzleMix</td><td>AutoMix</td><td>AdAutoMix</td></tr><tr><td>ResNet18</td><td>15.3</td><td>4.4</td><td>8.9</td><td>6.5</td><td>3.7</td><td>3.4</td><td>3.2 (-0.2)</td></tr><tr><td>Swin-Tiny</td><td>13.4</td><td>10.1</td><td>9.2</td><td>9.3</td><td>16.7</td><td>10.5</td><td>9.2 (-0.0)</td></tr></table>
|
| 376 |
+
|
| 377 |
+

|
| 378 |
+
Figure 7: Calibration plots of Mixup variants and AdAutoMix on CIFAR-100 using ResNet-18. The red line indicates the expected prediction tendency.
|
| 379 |
+
|
| 380 |
+
# A.6 THE ACCURACY OF VARIOUS MIXUP APPROACHES ON OCCLUSION IMAGE SET
|
| 381 |
+
|
| 382 |
+
Table 7: The accuracies of ResNet50 and Swin-Tiny Transformer trained by various Mixup approaches on CIFAR100 and CUB200 datasets with different occlusion ratios.
|
| 383 |
+
|
| 384 |
+
<table><tr><td rowspan="2">Method</td><td colspan="10">Swin-Tiny Transformer on CIFAR100</td></tr><tr><td>0%</td><td>10%</td><td>20%</td><td>30%</td><td>40%</td><td>50%</td><td>60%</td><td>70%</td><td>80%</td><td>90%</td></tr><tr><td>MixUP</td><td>76.82</td><td>74.54</td><td>71.88</td><td>67.98</td><td>63.18</td><td>55.26</td><td>44.20</td><td>30.07</td><td>15.69</td><td>6.14</td></tr><tr><td>PuzzleMix</td><td>80.45</td><td>78.98</td><td>77.52</td><td>75.47</td><td>71.16</td><td>64.42</td><td>53.40</td><td>38.53</td><td>21.39</td><td>7.91</td></tr><tr><td>AutoMix</td><td>82.68</td><td>81.40</td><td>79.05</td><td>75.44</td><td>70.61</td><td>64.30</td><td>55.25</td><td>40.92</td><td>23.09</td><td>9.73</td></tr><tr><td>AdAutoMix</td><td>84.33</td><td>82.41</td><td>80.16</td><td>76.84</td><td>72.09</td><td>66.74</td><td>58.09</td><td>46.48</td><td>28.02</td><td>9.91</td></tr><tr><td colspan="11">ResNet-50 on CUB200</td></tr><tr><td>Method</td><td>0%</td><td>10%</td><td>20%</td><td>30%</td><td>40%</td><td>50%</td><td>60%</td><td>70%</td><td>80%</td><td>90%</td></tr><tr><td>Vanilla</td><td>82.15</td><td>74.75</td><td>61.89</td><td>46.24</td><td>30.81</td><td>16.67</td><td>8.94</td><td>4.63</td><td>2.23</td><td>1.07</td></tr><tr><td>CutMix</td><td>83.05</td><td>76.45</td><td>64.44</td><td>50.86</td><td>39.47</td><td>28.99</td><td>20.78</td><td>14.46</td><td>8.64</td><td>2.21</td></tr><tr><td>PuzzleMix</td><td>84.01</td><td>80.99</td><td>76.01</td><td>68.45</td><td>58.15</td><td>43.44</td><td>28.41</td><td>15.38</td><td>5.76</td><td>2.39</td></tr><tr><td>AutoMix</td><td>84.10</td><td>81.90</td><td>78.05</td><td>73.18</td><td>64.96</td><td>51.21</td><td>36.85</td><td>22.35</td><td>8.63</td><td>3.88</td></tr><tr><td>AdAutoMix</td><td>84.57</td><td>82.46</td><td>80.16</td><td>75.84</td><td>66.19</td><td>55.74</td><td>40.19</td><td>25.44</td><td>10.04</td><td>4.39</td></tr></table>
|
| 385 |
+
|
| 386 |
+

|
| 387 |
+
Figure 8: The images with different occlusion ratios.
|
| 388 |
+
|
| 389 |
+
# A.7 THE CURVES OF EFFICIENCY AGAINST ACCURACY
|
| 390 |
+
|
| 391 |
+
The training time of various mixup data augmentation approaches against accuracy is shown in Fig. 9. AdAutoMix take more computation time, but it consistently outperforms previous state-of-the-art methods with different ResNet architectures on different datasets.
|
| 392 |
+
|
| 393 |
+

|
| 394 |
+
Figure 9: The plot of efficiency vs. accuracy
|
| 395 |
+
|
| 396 |
+
# A.8 ADAUTOMIX MODULES EXPERIMENT
|
| 397 |
+
|
| 398 |
+
Table 8 lists the accuracy of our AdAutoMix by gradually increasing regularization terms. The experimental results imply that each regularization term is capable of improving the robustness of AdAutoMix.
|
| 399 |
+
|
| 400 |
+
Table 9 shows the accuracy of our AdAutoMix with different $\lambda$ . The experimental results show that AdAutoMix with $\lambda = 1$ as default achieves the best performances on CIFAR100 dataset.
|
| 401 |
+
|
| 402 |
+
Table 10 shows the accuracy of our AdAutoMix with different input image number $N$ . From Table 10, we can see that AdAutoMix achieve the highest accuracy with $N = 3$ on CIFAR100.
|
| 403 |
+
|
| 404 |
+
Table 8: Loss function experiments on CIFAR100 based on ResNet18.
|
| 405 |
+
|
| 406 |
+
<table><tr><td>Method</td><td>Base</td><td>Base+0.5Lace</td><td>Base+0.5Lace + 0.5Lmce</td><td>Base+0.5Lace + 0.5Lmce -0.3Lamce</td><td>Base+0.5Lace + 0.5Lmce -0.3Lamce + 0.7Lcosine</td></tr><tr><td>ResNet18</td><td>79.38</td><td>79.98</td><td>80.04</td><td>81.32</td><td>81.55</td></tr></table>
|
| 407 |
+
|
| 408 |
+
Table 9: Classification accuracy of ResNet 18 with different $\lambda$ ratio.
|
| 409 |
+
|
| 410 |
+
<table><tr><td rowspan="2">Method</td><td colspan="5">CIFAR100</td></tr><tr><td>0.2</td><td>1.0</td><td>2.0</td><td>5.0</td><td>10.0</td></tr><tr><td>ResNet18</td><td>82.27</td><td>82.32</td><td>81.73</td><td>80.02</td><td>81.05</td></tr><tr><td>ResNeXt50</td><td>84.22</td><td>84.40</td><td>83.99</td><td>84.31</td><td>83.63</td></tr></table>
|
| 411 |
+
|
| 412 |
+
Table 10: Classification accuracy of ResNet18 trained by AdAutoMix with different input image number $N$ , where $N = 1$ means that it is vanilla method.
|
| 413 |
+
|
| 414 |
+
<table><tr><td rowspan="2">inputs</td><td colspan="3">CIFAR100</td></tr><tr><td>Top1-Acc(%)</td><td>Top5-Acc(%)</td><td>Times s/iter</td></tr><tr><td>N = 1</td><td>78.04</td><td>94.60</td><td>0.1584</td></tr><tr><td>N = 2</td><td>82.16</td><td>95.88</td><td>0.1796</td></tr><tr><td>N = 3</td><td>82.32</td><td>95.92</td><td>0.2418</td></tr><tr><td>N = 4</td><td>81.78</td><td>95.68</td><td>0.2608</td></tr><tr><td>N = 5</td><td>80.79</td><td>95.80</td><td>0.2786</td></tr></table>
|
| 415 |
+
|
| 416 |
+
# A.9 ACCURACY OF RESNET-18 TRAINED BY ADAUTOMIX WITH AND WITHOUT ADVERSARIAL METHODS.
|
| 417 |
+
|
| 418 |
+
Figure10 shows the accuracy of ResNet-18 trained by our AdAutoMix with and without adversarial training on CIFAR100. The experimental results demonstrate AdAutoMix with adversarial training achieves higher classification accuracy on CIFAR100 dataset, which implies that the proposed adversarial framework is capable of generating harder samples to improve the robustness of classifier.
|
| 419 |
+
|
| 420 |
+

|
| 421 |
+
ResNet-18 on CIFAR100
|
| 422 |
+
Figure 10: The Top-1 accuracy plot of AdAutoMix training with and without adversarial methods.
|
| 423 |
+
|
| 424 |
+
# A.10 COMPARISON WITH OTHER ADVERSARIAL DATA AUGMENTATION
|
| 425 |
+
|
| 426 |
+
We further compare Mixup (Zhang et al., 2017) and our AdAutoMix with existing Adversarial Data Augmentation methods, e.g. DADA (Li et al., 2020), ME-ADA (Zhao et al., 2020), and SAMix (Zhang et al., 2023). Table 11 depicts the classification accuracy of various approaches. The experimental results in Table 11 demonstrate that our AdAutoMix outperforms existing Adversarial Data Augmentation methods and achieves the highest accuracy on the CIFAR100 dataset.
|
| 427 |
+
|
| 428 |
+
Table 11: Experiments with AdAutoMix and other Adversarial Data Augmentation methods.
|
| 429 |
+
|
| 430 |
+
<table><tr><td></td><td>Baseline</td><td>MixUp</td><td>DADA</td><td>ME-ADA</td><td>SAMix</td><td>AdAutoMix</td></tr><tr><td>ResNet-18</td><td>76.42</td><td>78.52</td><td>78.86</td><td>77.45</td><td>54.01</td><td>81.55</td></tr></table>
|
| 431 |
+
|
| 432 |
+
# A.11 ALGORITHM OF ADAUTOMIX
|
| 433 |
+
|
| 434 |
+
Algorithm 1 AdAutoMix training process
|
| 435 |
+
Input: Encoder $E_{\phi}, E_{\widehat{\phi}}$ , Classifier $\psi_W, \psi_{\widehat{W}}$ , Samples $\mathbb{S}$ , lambda $\lambda$ , Generator $G_{\theta}(\cdot)$ , coefficient $\xi$ and feature map $z_n^l$
|
| 436 |
+
1: $E_{\widehat{\phi}}$ .parmas = $E_{\phi}$ .params
|
| 437 |
+
2: for $\mathbb{X}, \mathbb{Y}$ in $\mathbb{S}$ loder do
|
| 438 |
+
3: $z_n^l = E_{\widehat{\phi}}(\mathbb{X})$
|
| 439 |
+
4: $x_{mix} = G_{\theta}(z_n^l, \lambda)$
|
| 440 |
+
5: $L_{amce} = \psi_W(x_{mix}, \lambda, \mathbb{Y})$
|
| 441 |
+
6: $L_{amce}, L_{cosine} = \psi_{\widehat{W}}(x_{mix}, \lambda, \mathbb{Y})$
|
| 442 |
+
7: for $1 < t_1 < T_1$ do
|
| 443 |
+
8: Update $W(t + 1)$ according to Eq.14
|
| 444 |
+
9: end for
|
| 445 |
+
10: for $1 < t_2 < T_2$ do
|
| 446 |
+
11: Update $\theta(t + 1)$ according to Eq.16
|
| 447 |
+
12: end for
|
| 448 |
+
13: Update $(E_{\widehat{\phi}}$ .params, $E_{\phi}$ .params)
|
| 449 |
+
14: $E_{\widehat{\phi}}$ .params = $\xi * E_{\widehat{\phi}}$ .params + $(1 - \xi) * E_{\phi}$ .params
|
| 450 |
+
15: end for
|
| 451 |
+
|
| 452 |
+
# B VISUALIZATION OF MIXED SAMPLES
|
| 453 |
+
|
| 454 |
+
B.1 CLASS ACTIVATION MAPPING (CAM) OF DIFFERENT MIXUP SAMPLES.
|
| 455 |
+
|
| 456 |
+
The Class activation mapping (CAM) of various Mixup models are shown in Fig. 11.
|
| 457 |
+
|
| 458 |
+

|
| 459 |
+
Figure 11: The class activation map of various Mixup models $(\lambda = 0.5)$ .
|
| 460 |
+
|
| 461 |
+
# B.2 MIXED SAMPLES ON CUB-200
|
| 462 |
+
|
| 463 |
+
The mixed samples generated by our approach trained on CUB200 dataset are depicted in Fig. 12.
|
| 464 |
+
|
| 465 |
+

|
| 466 |
+
Figure 12: Visualization of mixed samples on CUB-200.
|
| 467 |
+
|
| 468 |
+

|
| 469 |
+
|
| 470 |
+

|
| 471 |
+
|
| 472 |
+

|
| 473 |
+
|
| 474 |
+

|
| 475 |
+
|
| 476 |
+

|
| 477 |
+
|
| 478 |
+

|
| 479 |
+
|
| 480 |
+

|
| 481 |
+
|
| 482 |
+
# B.3 MIXED SAMPLES ON CIFAR100
|
| 483 |
+
|
| 484 |
+
The mixed samples generated by our approach trained on CIFAR100 dataset are shown in Fig. 13.
|
| 485 |
+
|
| 486 |
+

|
| 487 |
+
Figure 13: Visualization of mixed samples on CIFAR100.
|
| 488 |
+
|
| 489 |
+

|
| 490 |
+
|
| 491 |
+

|
| 492 |
+
|
| 493 |
+

|
| 494 |
+
|
| 495 |
+

|
| 496 |
+
|
| 497 |
+
# B.4 DIVERSITY OF SAMPLES GENERATED BY VARIOUS APPROACHES
|
| 498 |
+
|
| 499 |
+
To demonstrate that AdAutoMix is capable of generating diversity samples, we show the synthesising images of AdAutoMix and AutoMix on ImageNet-1K. From Fig. 14, we can see that AdAutoMix produces mixed samples with more differences. By contrast, AutoMix generates similar images at different iteration epochs, which implies that the proposed AdAutoMix has the capacity to produce diverse images by adversarial training.
|
| 500 |
+
|
| 501 |
+

|
| 502 |
+
Figure 14: Mixed samples with different epochs.
|
adversarialautomixup/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c78c732498682c72dfd82118935585a85da7344446d451c1a4ec0f6084f8f12b
|
| 3 |
+
size 1141763
|
adversarialautomixup/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bb85299c754f0c95ba26c38b4df4928e7eca42a40dbd08199b7d243cb3fa092b
|
| 3 |
+
size 674088
|
ageneralframeworkforuserguidedbayesianoptimization/168ed08a-b567-41d2-abd2-5e82251d79b2_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:32cd87fa0ee8362de8856022c37e47fb69f87ed869525099ef6e7bcbc8d4fd25
|
| 3 |
+
size 107998
|
ageneralframeworkforuserguidedbayesianoptimization/168ed08a-b567-41d2-abd2-5e82251d79b2_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a8b4b1efe2ff9127c7909219492e93fc8119790417b1caee36efbd018a734c78
|
| 3 |
+
size 133023
|
ageneralframeworkforuserguidedbayesianoptimization/168ed08a-b567-41d2-abd2-5e82251d79b2_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79d6c9ba2cf8ef4738bf7716c3089e39213f7b6ad638796f283bacc7d597c6fd
|
| 3 |
+
size 1338684
|
ageneralframeworkforuserguidedbayesianoptimization/full.md
ADDED
|
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A GENERAL FRAMEWORK FOR USER-GUIDED BAYESIAN OPTIMIZATION
|
| 2 |
+
|
| 3 |
+
Carl Hvarfner
|
| 4 |
+
|
| 5 |
+
Lund University
|
| 6 |
+
|
| 7 |
+
carl.hvarfner@cs.lth.se
|
| 8 |
+
|
| 9 |
+
Frank Hutter
|
| 10 |
+
|
| 11 |
+
University of Freiburg
|
| 12 |
+
|
| 13 |
+
fh@cs.uni-freiburg.de
|
| 14 |
+
|
| 15 |
+
Luigi Nardi
|
| 16 |
+
|
| 17 |
+
DBtune, Lund University, Stanford University
|
| 18 |
+
|
| 19 |
+
luigi.nardi@cs.lth.se
|
| 20 |
+
|
| 21 |
+
# ABSTRACT
|
| 22 |
+
|
| 23 |
+
The optimization of expensive-to-evaluate black-box functions is prevalent in various scientific disciplines. Bayesian optimization is an automatic, general and sample-efficient method to solve these problems with minimal knowledge of the underlying function dynamics. However, the ability of Bayesian optimization to incorporate prior knowledge or beliefs about the function at hand in order to accelerate the optimization is limited, which reduces its appeal for knowledgeable practitioners with tight budgets. To allow domain experts to customize the optimization routine, we propose ColaBO, the first Bayesian-principled framework for incorporating prior beliefs beyond the typical kernel structure, such as the likely location of the optimizer or the optimal value. The generality of ColaBO makes it applicable across different Monte Carlo acquisition functions and types of user beliefs. We empirically demonstrate ColaBO's ability to substantially accelerate optimization when the prior information is accurate, and to retain approximately default performance when it is misleading.
|
| 24 |
+
|
| 25 |
+
# 1 INTRODUCTION
|
| 26 |
+
|
| 27 |
+
Bayesian Optimization (BO) (Mockus et al., 1978; Jones et al., 1998; Snoek et al., 2012) is a well-established methodology for the optimization of expensive-to-evaluate black-box functions. Known for its sample efficiency, BO has been successfully applied to a variety of domains where laborious system tuning is prominent, such as hyperparameter optimization (Snoek et al., 2012; Bergstra et al., 2011b; Lindauer et al., 2022), neural architecture search (Ru et al., 2021; White et al., 2021), robotics (Calandra et al., 2014; Mayr et al., 2022), hardware design (Nardi et al., 2019; Ejjeh et al., 2022), and chemistry (Griffiths & Hernández-Lobato, 2020).
|
| 28 |
+
|
| 29 |
+
Typically employing a Gaussian Process (Rasmussen & Williams, 2006) (GP) surrogate model, BO allows the user to specify a prior over functions $p(f)$ via the Gaussian Process kernel, as well as an optional prior over its hyperparameters. Within the framework of the prior, the user can specify expected smoothness, output range and possible noise level of the function at hand, with the hopes of accelerating the optimization if accurate. However, the prior beliefs that can be specified within the framework of the kernel hyperparameters do not span the full range of beliefs that practitioners may possess. For example, practitioners may know which parts of the input space tend to work best (Oh et al., 2018; Perrone et al., 2019; Smith, 2018; Wang et al., 2019), know a range or upper bound on the optimal output (Jeong & Kim, 2021; Nguyen & Osborne, 2020) such as a maximal achievable accuracy of $100\%$ , or other properties of the objective, such as preference relations between configurations (Huang et al., 2022). The limited ability of practitioners to interact and collaborate with the BO machinery (Kumar et al., 2022) thus runs the risk of failing to use valuable domain expertise, or alienating knowledgeable practitioners altogether. While knowledge injection beyond what is natively supported by the GP kernel is crucial to further increase the efficiency of
|
| 30 |
+
|
| 31 |
+

|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
Figure 1: Three different ColaBO priors: (left) Prior over the optimum $x_{*}$ , and the induced changed in the GP for an optimum located in the green region. (middle) Prior over optimal value, $f^{*} < 0.8$ . (right) Prior over preference relations $f(\pmb{x})_1 \geqslant f(\pmb{x}_2)$ for five preferences (green arrows, e.g. $f(0.0) \geqslant f(0.1) \geqslant f(0.2)$ ).
|
| 39 |
+
|
| 40 |
+

|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
|
| 44 |
+
Bayesian optimization, so far no current approach allows for the integration of arbitrary types of user knowledge. To address this gap, we propose an intuitive framework that effectively allows the user to reshape the Gaussian process at will to mimic their held beliefs.
|
| 45 |
+
|
| 46 |
+
Figure 1 illustrates how, for the three aforementioned priors, the GP is reshaped to faithfully represent the belief held by the user - whether it be a prior over the optimum (left), optimal value (middle), or preference relations between points (right). Our novel framework for Collaborative Bayesian Optimization (ColaBO) diverges from the typical assumption of Gaussian posteriors, and is applicable to any Monte Carlo acquisition function (Wilson et al., 2017; 2018; Balandat et al., 2020). Formally, we make the following contributions:
|
| 47 |
+
|
| 48 |
+
1. We introduce ColaBO, a framework for incorporating user knowledge over the optimizer, optimal value and preference relations into Bayesian optimization in the form of an additional prior on the surrogate, orthogonal to the conventional prior on the kernel hyperparameters,
|
| 49 |
+
2. We demonstrate that the proposed framework is generally applicable to Monte Carlo acquisition functions, inheriting MC acquisition function utility,
|
| 50 |
+
3. We empirically show that ColaBO accelerates optimization when injected with for priors over optimal location and optimal value.
|
| 51 |
+
|
| 52 |
+
# 2 BACKGROUND
|
| 53 |
+
|
| 54 |
+
We outline Bayesian optimization, Gaussian Processes and Monte Carlo (MC) acquisition functions, as well as the concept of a prior over the optimum.
|
| 55 |
+
|
| 56 |
+
# 2.1 BAYESIAN OPTIMIZATION
|
| 57 |
+
|
| 58 |
+
We consider the problem of optimizing a black-box function $f$ across a set of feasible inputs $\mathcal{X} \subset \mathbb{R}^d$ :
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
\boldsymbol {x} ^ {*} \in \underset {\boldsymbol {x} \in \mathcal {X}} {\arg \max } f (\boldsymbol {x}). \tag {1}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
We assume that $f(\pmb{x})$ is expensive to evaluate and can potentially only be observed through a noise-corrupted estimate, $y_{\pmb{x}}$ , where $y_{\pmb{x}} = f(\pmb{x}) + \epsilon, \epsilon \sim \mathcal{N}(0, \sigma_{\epsilon}^{2})$ for some noise level $\sigma_{\epsilon}^{2}$ . In this setting, we wish to maximize $f$ in an efficient manner. Bayesian optimization (BO) aims to globally maximize $f$ by an initial design and thereafter sequentially choosing new points $x_{n}$ for some iteration $n$ , creating the data $\mathcal{D}_n = \mathcal{D}_{n-1} \cup \{(x_n, y_n)\}$ (Brochu et al., 2010; Shahriari et al., 2016; Garnett, 2022). After each new observation, BO constructs a probabilistic surrogate model $p(f|\mathcal{D}_n)$ (Snoek et al., 2012; Hutter et al., 2011; Bergstra et al., 2011a; Müller et al., 2023) and uses that surrogate to build an acquisition function $\alpha(x; \mathcal{D}_n)$ which selects the next query.
|
| 65 |
+
|
| 66 |
+
# 2.2 GAUSSIAN PROCESSES
|
| 67 |
+
|
| 68 |
+
When constructing the surrogate, the most common choice is a Gaussian process (GP) (Rasmussen & Williams, 2006). The GP utilizes a covariance function $k$ , which encodes a prior belief for the smoothness of $f$ , and determines how previous observations influence prediction. Given observations $\mathcal{D}_n$ at iteration $n$ , the Gaussian posterior $p(f|\mathcal{D}_n)$ over the objective is characterized by the posterior mean $\mu_n(\boldsymbol{x},\boldsymbol{x}')$ and (co-)variance $\Sigma_n(\boldsymbol{x},\boldsymbol{x}')$ of the GP:
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
\mu_ {n} (\boldsymbol {x}) = \mathbf {k} _ {n} (\boldsymbol {x}) ^ {\top} (\mathbf {K} _ {n} + \sigma_ {\epsilon} ^ {2} \mathbf {I}) ^ {- 1} \mathbf {y}, \quad \Sigma_ {n} (\boldsymbol {x}, \boldsymbol {x} ^ {\prime}) = k (\boldsymbol {x}, \boldsymbol {x} ^ {\prime}) - \mathbf {k} _ {n} (\boldsymbol {x}) ^ {\top} (\mathbf {K} + \sigma_ {\epsilon} ^ {2} \mathbf {I}) ^ {- 1} \mathbf {k} _ {n} (\boldsymbol {x} ^ {\prime}),
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
where $(\mathbf{K}_n)_{ij} = k(\pmb{x}_i,\pmb{x}_j)$ , $\mathbf{k}_n(\pmb{x}) = [k(\pmb{x},\pmb{x}_1),\dots,k(\pmb{x},\pmb{x}_n)]^\top$ and $\sigma_{\epsilon}^{2}$ is the noise variance. For applications in BO and beyond, samples from the posterior are required either directly for optimization (Eriksson et al., 2019) through Thompson sampling (Thompson, 1933), or to estimate auxiliary quantities of interest (Hernandez-Lobato et al., 2015; Neiswanger et al., 2021; Hvarfner et al., 2023). For a finite set of $k$ query locations $(X = x_1,\ldots,x_k)$ , the classical method of generating samples is via a location-scale transform of Gaussian random variables, $f(\pmb{X}) = \mu_n(\pmb{X}) + \pmb{L}\pmb{\epsilon}$ , where $\pmb{L}$ is the Cholesky decomposition of $\pmb{K}$ and $\epsilon \sim \mathcal{N}(0,\pmb{I})$ . Unfortunately, the classic approach is intrinsically non-scalable, incurring a $\mathcal{O}(k^3)$ cost due to the aforementioned matrix decomposition.
|
| 75 |
+
|
| 76 |
+
# 2.3 DECOUPLED POSTERIOR SAMPLING
|
| 77 |
+
|
| 78 |
+
To remedy the issue of scalability in posterior sampling, $\mathcal{O}(k)$ weight-space approximations based on Random Fourier Features (RFF) (Rahimi & Recht, 2007) obtain approximate (continuous) function draws $\tilde{f}(\boldsymbol{x}) = \sum_{i=1}^{m} w_i \phi_i(\boldsymbol{x})$ , where $\phi_i(\boldsymbol{x}) = \frac{2}{\ell} (\psi_i^\top \boldsymbol{x} + b_i)$ . The random variables $w_i \sim \mathcal{N}(0,1)$ , $b_i \sim \mathcal{U}(0,2\pi)$ , and $\psi_i$ are sampled proportional to the spectral density of $k$ .
|
| 79 |
+
|
| 80 |
+
While achieving scalability, the seminal RFF approach by Rahimi & Recht (2007) suffers from the issue of variance starvation (Mutny & Krause, 2018; Wang et al., 2018; Wilson et al., 2020). As a remedy, Wilson et al. (2020) decouple the draw of functions from the approximate posterior $p(\tilde{f}|\mathcal{D})$ into a more accurate draw from the prior $p(\tilde{f})$ , followed by a deterministic data-dependent update:
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
(\tilde {f} | \mathcal {D}) (\boldsymbol {x}) \stackrel {{d}} {{=}} \underbrace {\tilde {f} (\boldsymbol {x})} _ {\text {d r a w f r o m p r i o r}} + \underbrace {\mathbf {k} _ {n} (\boldsymbol {x}) ^ {\top} \left(\mathbf {K} _ {n} + \sigma_ {\epsilon} ^ {2} \mathbf {I}\right) ^ {- 1} (\mathbf {y} - \tilde {f} (\boldsymbol {x}) - \boldsymbol {\epsilon})} _ {\text {d e t e r m i n i s t i c u p d a t e}} \tag {2}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
Eq. 2 deviates from the distribution-first approach that is typically prevalent in GPs in favor of a variable-first approach utilizing Matheron's rule (Journel & Huijbregts, 1976).
|
| 87 |
+
|
| 88 |
+
# 2.4 MONTE CARLO ACQUISITION FUNCTIONS
|
| 89 |
+
|
| 90 |
+
Acquisition functions act on the surrogate model to quantify the utility of a point in the search space. They encode a trade-off between exploration and exploitation, typically using a greedy heuristic to do so. A simple and computationally cheap heuristic is Expected Improvement (E I) (Jones et al., 1998; Bull, 2011). For a noiseless function and a current best observation $y_{n}^{*}$ , the E I acquisition function is $\alpha_{EI}(\pmb{x}) = \mathbb{E}_{y_{\pmb{x}}}[(y_n^* - y_x)^+]$ . For noisy problem settings, a noise-adapted variant of E I (Letham et al., 2018) is frequently considered, where both the incumbent $y_{n}^{*}$ and the upcoming query $y_{x}$ are substituted for the non-observable noiseless incumbent $f_{n}^{*}$ and noiseless upcoming query $f_{x}$ . Other frequently used acquisition functions are the Upper Confidence Bound (UCB) (Srinivas et al., 2012), Probability of Improvement (PI) (Kushner, 1964) and Knowledge Gradient (KG) (Frazier et al., 2009). Information-theoretic acquisition functions consider the mutual information to select the next query $\alpha_{\mathrm{MI}}(\pmb{x}) = I((\pmb{x}, y_{\pmb{x}}); *|\mathcal{D}_n)$ , where $*$ can entail either the optimum $x_{*}$ as in (Predictive) Entropy Search (ES/PES) (Hennig & Schuler, 2012; Hernández-Lobato et al., 2014), the optimal value $f_{*}$ as in Max-value Entropy Search (MES) (Wang & Jegelka, 2017; Moss et al., 2021) or the tuple $(x_{*}, f_{*})$ for Joint Entropy Search (JES) (Hvarfner et al., 2022a; Tu et al., 2022).
|
| 91 |
+
|
| 92 |
+
All the aforementioned acquisition functions compute expectations $\mathbb{E}_{f_x}$ (or alternatively $\mathbb{E}_{y_x}$ ) over some utility $u(f_x)$ of the output (Wilson et al., 2017; 2018), which typically have simple, or even closed-form, solutions for Gaussian posteriors. However, approximating the expectation through Monte Carlo integration has proven useful in the context of batch optimization (Wilson et al., 2018), efficient acquisition function approximation (Balandat et al., 2020), and non-Gaussian posteriors (Astudillo & Frazier, 2021). By sampling over possible outputs $f_x$ and utilizing the reparametrization
|
| 93 |
+
|
| 94 |
+
trick (Kingma & Welling, 2014; Rezende et al., 2014), utilities $u$ can be easily computed across a larger set of applications and be optimized to greater accuracy.
|
| 95 |
+
|
| 96 |
+
# 2.5 PRIOR OVER THE OPTIMUM
|
| 97 |
+
|
| 98 |
+
A prior over the optimum (Souza et al., 2021; Hvarfner et al., 2022b; Mallik et al., 2023) is a user-specified belief $\pi : \mathcal{X} \to \mathbb{R}$ of the subjective likelihood that a given $\pmb{x}$ is optimal. Formally,
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\pi (\boldsymbol {x}) = \mathbb {P} \left(\boldsymbol {x} = \underset {\boldsymbol {x} ^ {\prime}} {\arg \max } f \left(\boldsymbol {x} ^ {\prime}\right)\right). \tag {3}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
This prior is generally considered to be independent of observed data, but rather a result of previous experimentation or anecdotal evidence. Regions that the user expects to contain the optimum will typically have a high value, but this does not exclude the chance of the user belief $\pi(x)$ to be inaccurate, or even misleading. Lastly, we require $\pi$ to be strictly positive in all of $\mathcal{X}$ , which suggests that any point included in the search space may be optimal.
|
| 105 |
+
|
| 106 |
+
# 3 METHODOLOGY
|
| 107 |
+
|
| 108 |
+
We now introduce ColaBO, the first Bayesian-principled BO framework that flexibly allows users to collaborate with the optimizer by injecting prior knowledge about the objective that substantially exceeds the type of prior knowledge natively supported by GPs. In Sec. 3.1, we introduce and derive a novel prior over function properties, which yields a surrogate model conditioned on the user belief. Thereafter, in Sec. 3.2, we demonstrate how the hierarchical prior integrates with MC acquisition functions. Lastly, in Sec. 3.3, we state practical considerations to assure the performance of ColaBO.
|
| 109 |
+
|
| 110 |
+
# 3.1 PRIOR OVER FUNCTION PROPERTIES
|
| 111 |
+
|
| 112 |
+
We consider the typical GP prior over functions $p(f) = \mathcal{GP}(\mu, \Sigma)$ , where the characteristics of $f$ , such as smoothness and output magnitude, are fully defined by the kernel $k$ (and its associated hyperparameters $\theta$ , which are omitted for brevity). We seek to inject an additional, user-defined prior belief over $f$ into the GP, such as the prior over the optimum in Sec. 2.5, $\pi(\boldsymbol{x}) = \mathbb{P}\left(\boldsymbol{x} = \arg \max_{\boldsymbol{x}'} f(\boldsymbol{x}')\right)$ . By postulating that $\pi$ is accurate, we wish to form a belief-weighted prior - a prior over functions where the distribution over the optimum is exactly $\pi(\boldsymbol{x})$ . We start by considering the user belief $\pi: \mathcal{X} \to \mathbb{R}$ from Eq. (3), and extend the definition to involve the integration over $f$ , similarly to the Thompson sampling definition of Kandasamy et al. (2018). Formally,
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
\pi (\boldsymbol {x}) = \mathbb {P} \left(\boldsymbol {x} = \underset {\boldsymbol {x} ^ {\prime}} {\arg \max } f \left(\boldsymbol {x} ^ {\prime}\right)\right) = \int_ {f} \pi \left(\delta_ {*} (\boldsymbol {x} | f)\right) p (f) d f \tag {4}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
where $\delta_{*}(\pmb{x}|f) = 1$ , if $\pmb{x} = \arg \max_{\pmb{x}' \in \mathcal{X}} f(\pmb{x}')$ , and zero otherwise. As such, $\delta_{*}(\pmb{x}|f)$ maps a function $f_{i} \sim p(f)$ to its arg max, and evaluates whether this arg max is equal to $\pmb{x}$ .
|
| 119 |
+
|
| 120 |
+
However, a belief over the optimum, or any other property, of a function $f$ is implicitly a belief over the function $f$ itself. As such, a non-uniform $\pi(\pmb{x})$ should reasonably induce a change in the prior $p(f)$ to reflect the non-uniform optimum. To this end, we introduce an augmented user belief over the optimum $\rho_{\pmb{x}}^{*} \sim \mathcal{P}_{\pmb{x}}^{*}$ , where $\mathcal{P}_{\pmb{x}}^{*}$ is the prior over possible user beliefs, and draws are random functions $\rho_{\pmb{x}}^{*}: \mathcal{X} \to \mathbb{R}^{+}$ which themselves take a function $f$ as input, and output a positive real number quantifying the likelihood of a sample $f_{i}$ under $\pi(\pmb{x})$ . Formally, we define $\rho_{\pmb{x}}^{*}$ as
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
\rho_ {\boldsymbol {x}} ^ {*} (f) = \mathbb {P} \left(\boldsymbol {x} = \underset {\boldsymbol {x} ^ {\prime}} {\arg \max } f \left(\boldsymbol {x} ^ {\prime}\right)\right) = \frac {1}{Z _ {\rho_ {\boldsymbol {x}} ^ {*}}} \int_ {\mathcal {X}} \delta_ {*} (\boldsymbol {x} | f) \pi (\boldsymbol {x}) d \boldsymbol {x} \tag {5}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
where the intractable normalizing constant $Z_{\rho_x^*}$ arises from the fact that the integrated density $\pi(\pmb{x})$ acts on a finite-dimensional property of $f$ , and not $f$ itself. Under $\rho_x^*(f)$ , functions whose arg max lies in a high-density region under $\pi$ will be assigned a higher probability. Notably, the definition in 5 can extend to other properties of $f$ as well: a user belief $p_{f_*}$ over the optimal value $f_*$ analogously yields a belief over functions $\rho_{f_*}^*(f)$ :
|
| 127 |
+
|
| 128 |
+
$$
|
| 129 |
+
\rho_ {f _ {\boldsymbol {x}}} ^ {*} (f) = \mathbb {P} \left(\boldsymbol {x} = \max _ {\boldsymbol {x} ^ {\prime}} f \left(\boldsymbol {x} ^ {\prime}\right)\right) = \frac {1}{Z _ {\rho_ {f _ {\boldsymbol {x}}} ^ {*}}} \int_ {f _ {\boldsymbol {x}}} \delta_ {*} (\boldsymbol {x} | f) p _ {f *} (y) d f _ {\boldsymbol {x}}. \tag {6}
|
| 130 |
+
$$
|
| 131 |
+
|
| 132 |
+
It is worthwhile to reflect on the meaning of $\rho(f)$ , and how beliefs over function properties propagate to $p(f)$ . Concretely, if the user belief $\rho_{f_x}^*(f)$ asserts that the maximal value lies within $C_1 < \max f < C_2$ , the resulting distribution over $f$ should only contain functions whose max falls within this range. Using rejection sampling, functions which disobey this criterion are filtered out, which yields the posterior $p(f|\rho)$ . Having defined and exemplified how user beliefs impact the prior over functions $p(f)$ , the role of $\rho$ as a likelihood should be apparent: given a prior over functions $p(f)$ and a user belief over functions $\rho(f)$ which places a probability on all possible draws $f_i p(f)$ , we can form a belief-weighted prior $p(f|\rho) \propto p(f) \rho(f)$ . Thus, we introduce the formal definition of a user belief over a function property:
|
| 133 |
+
|
| 134 |
+
Definition 3.1 (User Belief over Functions). The user belief over functions $\rho(f) \propto \frac{p(f|\rho)}{p(f)}$ .
|
| 135 |
+
|
| 136 |
+
As the subsequent derived methodology applies independently of the specific property of $f$ that a prior is placed on, we will henceforth consider a belief over a general function property $\rho$ . Having defined the role of $\rho$ and the posterior over functions it produces, a natural question arises: How is $p(f|\rho)$ updated once observations $\mathcal{D}$ are obtained?
|
| 137 |
+
|
| 138 |
+
Since the data $\mathcal{D}$ is independent of the prior (the data generation process is intrinsically unaffected by the belief held by the user), application of Bayes' rule yields the following posterior $p(f|\mathcal{D},\rho)$
|
| 139 |
+
|
| 140 |
+
$$
|
| 141 |
+
p (f | \mathcal {D}, \rho) = \frac {p (\mathcal {D} , \rho | f) p (f)}{p (\mathcal {D} , \rho)} = \frac {p (\mathcal {D} | f) p (\rho | f) p (f)}{p (\mathcal {D}) p (\rho)} = \frac {p (f | \rho)}{p (f)} p (f | \mathcal {D}) \propto \rho (f) p (f | \mathcal {D}), \tag {7}
|
| 142 |
+
$$
|
| 143 |
+
|
| 144 |
+
where the right side of the proportionality in Eq. 7 suggests an intuitive generation process for samples $(f|\mathcal{D},\rho)$ to approximate the density $p(f|\mathcal{D},\rho)$ . Utilizing the pathwise update from Eq. 2, we note that given an approximate draw $\tilde{f}$ from the prior, the subsequent data-dependent update is deterministic. Recalling Eq. 2 and assuming independence between $\rho$ and $\mathcal{D}$ , $\rho$ only affects the draw from the prior, whereas $\mathcal{D}$ only affects the update. Consequently, we obtain
|
| 145 |
+
|
| 146 |
+
$$
|
| 147 |
+
(\tilde {f} | \mathcal {D}, \rho) (\boldsymbol {x}) \stackrel {{d}} {{=}} \underbrace {(\tilde {f} | \rho) (\boldsymbol {x})} _ {\text {d r a w f r o m p r i o r}} + \underbrace {\mathbf {k} _ {n} (\boldsymbol {x}) ^ {\top} \left(\mathbf {K} _ {n} + \sigma_ {\epsilon} ^ {2} \mathbf {I}\right) ^ {- 1} (\mathbf {y} - (\tilde {f} | \rho) (\boldsymbol {x}) - \epsilon)} _ {\text {d e t e r m i n i s t i c u p d a t e}}, \tag {8}
|
| 148 |
+
$$
|
| 149 |
+
|
| 150 |
+
where $(\tilde{f}|\rho) \sim p(f)\rho(\tilde{f})$ are once again obtained using rejection sampling on draws from $p(\tilde{f})$ . Figure 2 displays this in detail: given the typical GP prior over functions and a user belief over the optimum, we obtain a distribution over functions $p(\tilde{f}|\rho_{\mathbf{x}}^{*})$ before having observed any data (top right). Samples from the approximate prior $p(\tilde{f})$ (light blue) are re-sampled proportionally to their probability of occurring under the prior $\rho_{\mathbf{x}}^{*}(\tilde{f})$ in green, leaving samples $(\tilde{f}|\rho_{\mathbf{x}}^{*})$ in navy blue, which are highly probable under $\rho_{\mathbf{x}}^{*}$ . Once data is obtained, these samples are updates according to Eq. 8, which preserves the shape of the samples far away from observed data and yields the desired posterior.
|
| 151 |
+
|
| 152 |
+
# 3.2 PRIOR-WEIGHTED MONTE CARLO ACQUISITION FUNCTIONS
|
| 153 |
+
|
| 154 |
+
Naturally, neither the belief-weighted prior $p(f|\rho)$ nor the belief-weighted posterior $p(f|\mathcal{D},\rho)$ have a closed-form expression. Both are inherently non-Gaussian for non-uniform beliefs. As such, we resort to MC acquisition functions to compute utilities that are amenable to BO. In the subsequent section, we focus on the prevalent acquisition functions EI, and MES.
|
| 155 |
+
|
| 156 |
+
Expected Improvement The computation of the MC-EI within the ColaBO framework requires only minor adaptations of the original MC acquisition function. By definition, MC-EI assigns utility $u$ as $u_{\mathrm{EI}}(f(\pmb{x})) = \max(f_n^* - f(\pmb{x}), 0)$ , which yields
|
| 157 |
+
|
| 158 |
+
$$
|
| 159 |
+
\alpha_ {\mathrm {E I}} (\boldsymbol {x}; \mathcal {D}) = \mathbb {E} _ {f _ {\boldsymbol {x}} | \mathcal {D}} \left[ u _ {\mathrm {E I}} \left(f _ {\boldsymbol {x}}\right) \right] \approx \tag {9}
|
| 160 |
+
$$
|
| 161 |
+
|
| 162 |
+
$$
|
| 163 |
+
\sum_ {\ell} \max \left(f _ {n} ^ {*} - f _ {\boldsymbol {x}} ^ {(\ell)}, 0\right), f _ {\boldsymbol {x}} ^ {(\ell)} \sim p (f (\boldsymbol {x}) | \mathcal {D}). \tag {10}
|
| 164 |
+
$$
|
| 165 |
+
|
| 166 |
+

|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
Figure 3: (Top) Draws from $p(f|\mathcal{D})$ (light blue) and $p(f|\rho, \mathcal{D})$ with a prior $\rho$ located in the green region. (Bottom) Vanilla MC-EI and ColaBO MC-EI, resulting from computing the acquisition function from sample draws from $p(f|\rho, \mathcal{D})$ .
|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
Figure 2: (Top left) Draws from the prior $p(f)$ (light blue) and the belief-weighted prior $p(f|\rho)$ whose members are likely to have their optimum within the green region. (Top right) Pathwise updated draws based on observed data. As the green region is distant from the observed data, samples are almost unaffected by the data in this region. (Bottom left) Exact mean and standard deviation $(\mu_{\pmb{x}},\sigma_{\pmb{x}})$ of $p(f)$ and estimated mean and standard deviation of $p(f|\rho)$ . (Bottom right) Exact $p(f|\mathcal{D})$ and estimated $p(f|\rho,\mathcal{D})$ . As $p(f|\rho)$ constitutes of functions whose optimum is located within the green region the resulting model has a higher mean and lower variance within this region. Moreover, $p(f|\rho)$ globally displays lower upside variance compared to the vanilla GP.
|
| 173 |
+
|
| 174 |
+
Utilizing rejection sampling, we can compute the MC-EI under the ColaBO posterior accordingly,
|
| 175 |
+
|
| 176 |
+
$$
|
| 177 |
+
\alpha_ {\mathbb {E I}} (\boldsymbol {x}; \mathcal {D}, \rho) = \mathbb {E} _ {f _ {\boldsymbol {x}} | \mathcal {D}, \rho} [ u _ {\mathbb {E I}} (f _ {\boldsymbol {x}}) ] \propto \tag {11}
|
| 178 |
+
$$
|
| 179 |
+
|
| 180 |
+
$$
|
| 181 |
+
\int_ {f} u _ {\mathbb {E I}} \left(f _ {\boldsymbol {x}}\right) \rho (f) p (f | \mathcal {D}) d f \approx \sum_ {\ell} \rho \left(f ^ {(\ell)}\right) \max \left(f _ {n} ^ {*} - f _ {\boldsymbol {x}} ^ {(\ell)}, 0\right), \quad f _ {\boldsymbol {x}} ^ {(\ell)} \sim p (f (\boldsymbol {x}) | \mathcal {D}), \tag {12}
|
| 182 |
+
$$
|
| 183 |
+
|
| 184 |
+
wherein samples in Eq. 12 are drawn from the prior, retained with probability $\rho(f^{(\ell)}) / \max \rho$ , and pathwise updated. In Figure 3, we demonstrate how ColaBO-EI differs from MC-EI for an identical posterior as in Figure 2. By computing $\alpha_{\mathrm{EI}}$ from samples biased by $\rho$ , ColaBO substantially directs the search towards good regions under $\rho$ . Derivations for PI and KG are analogous to that of EI.
|
| 185 |
+
|
| 186 |
+
Max-Value Entropy Search We derive a ColaBO-MES acquisition function by first considering the definition of the entropy, $\mathrm{H}[p(y_{\mathbf{x}}|\mathcal{D})] = \mathbb{E}_{y_{\mathbf{x}}|\mathcal{D}}[-\log p(y_{\mathbf{x}}|\mathcal{D})]$ . When considering the belief-weighted posterior, we further condition the posterior on $\rho$ and obtain
|
| 187 |
+
|
| 188 |
+
$$
|
| 189 |
+
\begin{array}{l} \alpha_ {\mathrm {M E S}} (\boldsymbol {x}) = \mathbb {E} _ {f _ {*} | \mathcal {D}, \rho} \left[ \mathbb {E} _ {y _ {\boldsymbol {x}} | \mathcal {D}, \rho , f _ {*}} [ \log p (y _ {\boldsymbol {x}} | \mathcal {D}, \rho , f _ {*}) ] \right] - \mathbb {E} _ {y _ {\boldsymbol {x}} | \mathcal {D}, \rho} [ \log p (y _ {\boldsymbol {x}} | \mathcal {D}, \rho) ] (13) \\ \propto \mathbb {E} _ {f _ {*} | \mathcal {D}, \rho} \left[ \mathbb {E} _ {f _ {\boldsymbol {x}} | \mathcal {D}, \rho} \left[ \mathbb {E} _ {y _ {\boldsymbol {x}} | f _ {\boldsymbol {x}}} \left[ \log p \left(y _ {\boldsymbol {x}} \mid f _ {\boldsymbol {x}}, \rho , f _ {*}\right) \right] \right] \right] - \mathbb {E} _ {f _ {\boldsymbol {x}} | \mathcal {D}, \rho} \left[ \mathbb {E} _ {y _ {\boldsymbol {x}} | f _ {\boldsymbol {x}}} \left[ \log p \left(y _ {\boldsymbol {x}} \mid f _ {\boldsymbol {x}}, \rho\right) \right] \right] (14) \\ \approx \frac {1}{Z _ {J}} \sum_ {j = 1} ^ {J} \sum_ {\ell = 1} ^ {L} \sum_ {k = 1} ^ {K} \log p \left(y _ {\boldsymbol {x}} ^ {(k)} \mid f _ {\boldsymbol {x}} ^ {(\ell)}, f _ {*} ^ {(j)}\right) \rho \left(f ^ {(\ell)}\right) \rho \left(f ^ {(j)}\right) - \sum_ {\ell = 1} ^ {L} \sum_ {k = 1} ^ {K} \log p \left(y _ {\boldsymbol {x}} ^ {(k)} \mid f _ {\boldsymbol {x}} ^ {(\ell)}\right) \rho \left(f ^ {(\ell)}\right), (15) \\ \end{array}
|
| 190 |
+
$$
|
| 191 |
+
|
| 192 |
+
where $Z_J$ is a normalizing constant $\sum_{J} \rho(f^{(j)})$ brought on by sampling optimal values, $y_x | f_x$ can trivially be obtained by sampling Gaussian noise $\varepsilon \sim \mathcal{N}(0, \sigma_\varepsilon^2)$ to a noiseless observation $f_x | \mathcal{D}$ in the innermost expectation, and $f_x$ and $f_*$ are obtained through the pathwise sampling procedure outlined in Eq. 8. The samples are evaluated on $p((y_x | f_x), (y_x | f_x, f_*))$ . As evident by Eq. 15, $\rho$ affects the posterior distribution of both the observations $y_x$ and the optimal values $f_*$ . PES and JES are derived analogously. However, these acquisition functions require conditioning on additional, simulated data and consequently, additional pathwise updates, to compute.
|
| 193 |
+
|
| 194 |
+
# 3.3 PRACTICAL CONSIDERATIONS
|
| 195 |
+
|
| 196 |
+
ColaBO introduces additional flexibility to MC-based BO acquisition functions. The ColaBO framework deviates from vanilla (q-)MC acquisition functions (Wilson et al., 2017; Balandat et al., 2020) by utilizing approximate sample functions from the posterior, as opposed to pointwise draws from the posterior predictive and the reparametrization trick (Kingma & Welling, 2014; Rezende et al., 2014). ColaBO holds three shortcomings not prevalent in vanilla MC acquisition functions: (1) it cannot utilize Quasi-MC in the draws from the predictive posterior (only in the RFF weights),
|
| 197 |
+
|
| 198 |
+
Algorithm 1 ColaBO iteration
|
| 199 |
+
1: Input: User prior $\rho$ , number of function samples $L$ , current data $\mathcal{D}$
|
| 200 |
+
2: Output: Next query location $\pmb{x}'$ .
|
| 201 |
+
3: for $\ell \in \{1, \dots, L\}$ do
|
| 202 |
+
4: $\rho^{(\ell)} = \rho(\tilde{f}^{(\ell)}; n)$ , $\tilde{f}^{(\ell)} \sim p(\tilde{f})$
|
| 203 |
+
5: $(\tilde{f}^{(\ell)}|D) = \text{PathwiseUpdate}(\tilde{f}^{(\ell)}, D)$
|
| 204 |
+
6: end for
|
| 205 |
+
7: $p(\tilde{f}|D, \rho) \approx \sum_{\ell} \rho^{(\ell)}(\tilde{f}^{(\ell)}|D)$
|
| 206 |
+
8: $\pmb{x}' = \arg \max_{\pmb{x} \in \mathcal{X}} \mathbb{E}_{p(\tilde{f}|D, \rho)}[u(\tilde{f}_x)]$
|
| 207 |
+
9: ▷Maximize MC acquisition
|
| 208 |
+
|
| 209 |
+
(2) it cannot fix the base samples (Balandat et al., 2020) drawn from the posterior for acquisition function consistency across the search space, and (3) the RFF approximation of $p(f)$ introduces bias. This approximation error is substantially more pronounced for the Matérn 5/2-kernel than the squared exponential, leaving ColaBO best suited for the latter. In Sec. 4.1, we empirically display the impact of these shortcomings. While acquisition function optimization no longer enjoys improved accuracy resulting from reparametrization, the acquisition function can still benefit from the fact that ColaBO backpropagates through quantities computed as sums of smooth functions.
|
| 210 |
+
|
| 211 |
+
# 4 RESULTS
|
| 212 |
+
|
| 213 |
+
We evaluate the performance of ColaBO on various tasks, using priors over the optimum $\pi_{x_*}$ obtained from known optima on synthetic tasks, as well as from prior work (Mallik et al., 2023) on realistic tasks. We consider two variants of ColaBO: one using LogEI (Ament et al., 2023), a numerically stable, smoothed logsumexp transformation of EI with analogous derivation, and one variant using MES. We benchmark against the vanilla variants of each acquisition function, as well as $\pi$ BO (Hvarfner et al., 2022b) and decoupled Thompson sampling Thompson (1933); Wilson et al. (2020). All acquisition functions are implemented in BoTorch (Balandat et al., 2020) using a squared exponential kernel and MAP hyperparameter estimation. We present experiments with a Matérn-5/2 (Matérn, 1960) kernel in App. C.1. Unless stated otherwise, all methods are initialized with the mode of the prior followed by 2 Sobol samples. The experimental setup is outlined in Appendix B, and our code is publicly available at https://github.com/hvarfner/colabo.
|
| 214 |
+
|
| 215 |
+
# 4.1 APPROXIMATION QUALITY OF THE COLABO FRAMEWORK
|
| 216 |
+
|
| 217 |
+
Firstly, we demonstrate the approximation quality of ColaBO without user priors to assert its accuracy compared to a vanilla MC acquisition function. To facilitate comparison, we randomly sample 10 points on the Hartmann (3D) function, and optimize LogEI with a large budget. We subsequently optimize ColaBO-LogEI on the same set of points and compare the arg max to the solution found by the gold standard. Figure 4 displays the (log10) Euclidian distance between the arg max of LogEI and its ColaBO variant. We note that, for small amounts ( $\leqslant 256$ ) of posterior samples, the error induced by RFF bias is relatively low, which is evidenced by all RFF variants being roughly equal in distance to the true acquisition function optimizer.
|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
Figure 4: Mean and $1/4$ standard deviation of MC-induced errors of ColaBO-LogEI relative vanilla LogEI as measured by the distance to the arg max of the acquisition function on Hartmann (3D) on 10 randomly sampled points for 40 seeds.
|
| 221 |
+
|
| 222 |
+
# 4.2 SYNTHETIC FUNCTIONS WITH KNOWN PRIORS
|
| 223 |
+
|
| 224 |
+
We adapt a similar evaluation protocol to Hvarfner et al. (2022b), and evaluate ColaBO for two types of user beliefs for synthetic tasks: well-located and poorly located priors over the optimal location, designed to emulate a well-informed and poorly-informed practitioner, respectively. The well-located prior is offset by a small $(10\%)$ amount from the optimum, and the poorly located prior is maximally offset, while retaining its mode inside the search space. Complete details on the priors can be found
|
| 225 |
+
|
| 226 |
+

|
| 227 |
+
Figure 5: Performance on synthetic functions with well-located priors. Both ColaBO-LogEI and ColaBO-MES offer drastic speed-ups over their vanilla variants, and offer similar performance to $\pi$ BO. The ranking of ColaBO acquisition functions are generally consistent with their respective vanilla variants. This is most prominent on Rosenbrock (6D), where ColaBO-MES struggles similarly to vanilla MES.
|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
Figure 6: Performance on poorly located priors. ColaBO acquisition functions are more robust than $\pi$ BO, as it frequently recovers the performance of the vanilla acquisition function before the total budget is depleted. ColaBO-LogEI struggles marginally on Hartmann (6D). ColaBO-MES recovers the baseline on all tasks.
|
| 231 |
+
|
| 232 |
+
in Appendix B.3. On well-located priors, both ColaBO-LogEI and ColaBO-MES demonstrate substantially improved performance relative to their vanilla counterparts, comparable to $\pi$ BO on all benchmarks. On poorly located priors, ColaBO demonstrates superior robustness, recovering the performance of the vanilla acquisition function within the maximal budget of $20D$ iterations and clearly outperforming $\pi$ BO, which more frequently misled by the poor prior. In Appendix C.2, we also demonstrate ColaBO utilizing (accurate) beliefs over the optimal value: similarly to Figure 5, ColaBO yields increased efficiency relative to baselines, albeit not as substantial. Moreover, we demonstrate its usage with batch evaluations on well-located priors in Sec. C.3, showing that the drop in performance from batching evaluations is marginal at worst.
|
| 233 |
+
|
| 234 |
+
# 4.3 HYPERPARAMETER TUNING TASKS
|
| 235 |
+
|
| 236 |
+
Lastly, we evaluate ColaBO on three $4D$ deep learning HPO tasks from the PD1 (Wang et al., 2023) benchmarking suite. While the optima for these tasks are ultimately unknown, we utilize the priors provided in MF-Prior-Bench<sup>1</sup> (Mallik et al., 2023), which are intended to provide a good starting point for further optimization. To emulate a realistic HPO setting, we consider a smaller optimization budget of $10D$ iterations, and initialize all methods that utilize user beliefs with only one initial sample, that being the mode of the prior. The two ColaBO variants perform best in this evaluation, producing the best terminal performance on two tasks (CIFAR, LM1B), with all methods being tied on the third (CIFAR). ColaBO demonstrates consistent speed-ups compared to its vanilla counterparts, surpassing the terminal performance of the baseline within a third of the budget on CIFAR and LM1B. In App. A, we benchmark on 5 tasks from LBBench (Zimmer et al., 2020), displaying similar performance.
|
| 237 |
+
|
| 238 |
+
# 5 RELATED WORK
|
| 239 |
+
|
| 240 |
+
In BO, auxiliary prior information can be conveyed in multiple ways. We outline meta learning/transfer learning for BO based on data from previous experiments, and data-less approaches.
|
| 241 |
+
|
| 242 |
+
Learning from Previous Experiments Transfer learning and meta learning for BO aims to automatically extract and use knowledge from prior executions of BO by pre-training the model on
|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
Figure 7: Performance on the 4D PD1 hyperparameter tuning tasks of various deep learning pipelines. ColaBO drastically accelerates optimization initially, finding configurations with close to terminal performance quickly. $\pi$ BO offers competitive performance, but lacks the rapid initial progress of ColaBO on CIFAR and LM1B.
|
| 246 |
+
|
| 247 |
+
data acquired from previous executions (Swersky et al., 2013; Wistuba et al., 2015; Perrone et al., 2019; Feurer et al., 2015; 2018; Rothfuss et al., 2021a;b; Wistuba & Grabocka, 2021; Feurer et al., 2022). Typically, meta- and transfer learning exploit relevant previous data for training the GP for the current task while retaining predictive uncertainty to account for imperfect task correlation.
|
| 248 |
+
|
| 249 |
+
Expert Priors over Function Optimum Few previous works have proposed to inject explicit prior distributions over the location of an optimum into BO. In these cases, users explicitly define a prior that encodes their beliefs on where the optimum is more likely to be located. Bergstra et al. (2011a) suggest an approach that supports prior beliefs from a fixed set of distributions, which affects the very initial stage of optimization. However, this approach cannot be combined with standard acquisition functions. BOPrO (Souza et al., 2021) employs a similar structure that combines the user-provided prior distribution with a data-driven model into a pseudo-posterior. From the pseudo-posterior, configurations are selected using the EI acquisition function, using the formulation in Bergstra et al. (2011a). $\pi$ BO (Hvarfner et al., 2022b) suggests a general-purpose prior-weighted acquisition function, where the influence of the prior decreases over time. They provide convergence guarantees for when the framework is applied to the EI acquisition function. While effective, none of these approaches act on the surrogate model in a Bayesian-principled fashion, but strictly as heuristics. Moreover, they solely focus on priors over optimal inputs, thus offering less utility than ColaBO.
|
| 250 |
+
|
| 251 |
+
Priors over Optimal Value Similarly few works have addressed the issue of auxiliary knowledge of the optimal value. Both Jeong & Kim (2021) and Nguyen & Osborne (2020) propose altering the GP and accompanying it with tailored acquisition functions. Jeong & Kim (2021) employ variational inference, proposing distinct variational families depending on the type of knowledge pertaining to the optimal value. Nguyen & Osborne (2020) use a parabolic transformation of the output space to ensure an upper bound is preserved. Unlike ColaBO, neither of these methods is general enough to accompany arbitrary user priors to guide the optimization.
|
| 252 |
+
|
| 253 |
+
# 6 CONCLUSION, LIMITATIONS AND FUTURE WORK
|
| 254 |
+
|
| 255 |
+
We presented ColaBO, a flexible BO framework that allows practitioners to inject beliefs over function properties in a Bayesian-principled manner, allowing for increased efficiency in the BO procedure. ColaBO works across a collection of MC acquisition functions, inheriting their flexibility in batch optimization and ability to work with non-Gaussian posteriors. It demonstrates competitive performance for well-located priors, using them to substantially accelerate optimization. Moreover, it retains approximately baseline performance when applied to detrimental priors, demonstrating greater robustness than $\pi$ BO. ColaBO crucially relies on multiple steps of MC. While flexible, this approach drives computational expense in order to assert sufficient accuracy, requiring tens of seconds per evaluation to achieve desired accuracy, depending on the size of the benchmark. Moreover, obtaining draws from $\rho_{x}^{*}$ scales exponentially in the dimensionality of the prior. While practitioners are unlikely to specify priors over more than a handful of variables, ColaBO may become impractical when priors of higher dimensionality are employed. Paths for future work could involve more accurate and efficient sampling procedures (Lin et al., 2023) from the belief-weighted prior, as well as variational (Titsias, 2009) or pre-trained Müller et al. (2022); Müller et al. (2023) approaches to obtain a representative belief-biased model with an analytical posterior. This would likely bring down the runtime of ColaBO and broaden its potential use. Lastly, applying ColaBO to multi-fidelity optimization (Kandasamy et al., 2016; Mallik et al., 2023) offers an additional avenue for increased efficiency which would further increase its viability on costly deep learning pipelines.
|
| 256 |
+
|
| 257 |
+
# ACKNOWLEDGEMENTS
|
| 258 |
+
|
| 259 |
+
We thank the anonymous reviewers for their valuable contributions. Luigi Nardi was supported in part by affiliate members and other supporters of the Stanford DAWN project — Ant Financial, Facebook, Google, Intel, Microsoft, NEC, SAP, Teradata, and VMware. Carl Hvarfner, Erik Hellsten and Luigi Nardi were partially supported by the Wallenberg AI, Autonomous Systems and Software Program (WASP) funded by the Knut and Alice Wallenberg Foundation. Luigi Nardi was partially supported by the Wallenberg Launch Pad (WALP) grant Dnr 2021.0348. Frank Hutter acknowledges support through TAILOR, a project funded by the EU Horizon 2020 research and innovation programme under GA No 952215, by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under grant number 417962828, by the state of Baden-Württemberg through bwHPC and the German Research Foundation (DFG) through grant no INST 39/963-1 FUGG, and by the European Research Council (ERC) Consolidator Grant “Deep Learning 2.0” (grant no. 101045765). The computations were also enabled by resources provided by the Swedish National Infrastructure for Computing (SNIC) at LUNARC partially funded by the Swedish Research Council through grant agreement no. 2018-05973. Funded by the European Union. Views and opinions expressed are however those of the author(s) only and do not necessarily reflect those of the European Union or the ERC. Neither the European Union nor the ERC can be held responsible for them.
|
| 260 |
+
|
| 261 |
+

|
| 262 |
+
|
| 263 |
+
Funded by the European Union
|
| 264 |
+
|
| 265 |
+
# REFERENCES
|
| 266 |
+
|
| 267 |
+
Sebastian Ament, Samuel Daulton, David Eriksson, Maximilian Balandat, and Eytan Bakshy. Unexpected improvements to expected improvement for bayesian optimization. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=1vyAG6j9PE.
|
| 268 |
+
Raul Astudillo and Peter Frazier. Bayesian optimization of function networks. Advances in neural information processing systems, 34:14463-14475, 2021.
|
| 269 |
+
M. Balandat, B. Karrer, D. R. Jiang, S. Daulton, B. Letham, A. G. Wilson, and E. Bakshy. Botorch: A framework for efficient monte-carlo bayesian optimization. In Advances in Neural Information Processing Systems, 2020. URL http://arxiv.org/abs/1910.06403.
|
| 270 |
+
J. Bergstra, R. Bardenet, Y. Bengio, and B. Kégl. Algorithms for hyper-parameter optimization. In J. Shawe-Taylor, R. Zemel, P. Bartlett, F. Pereira, and K. Weinberger (eds.), Proceedings of the 25th International Conference on Advances in Neural Information Processing Systems (NeurIPS'11), pp. 2546-2554, 2011a.
|
| 271 |
+
James Bergstra, Rémi Bardenet, Yoshua Bengio, and Balázs Kégl. Algorithms for Hyper-Parameter Optimization. In Advances in Neural Information Processing Systems (NeurIPS), volume 24. Curran Associates, Inc., 2011b.
|
| 272 |
+
E. Brochu, V. Cora, and N. de Freitas. A tutorial on Bayesian optimization of expensive cost functions, with application to active user modeling and hierarchical reinforcement learning. arXiv:1012.2599v1 [cs.LG], 2010.
|
| 273 |
+
Adam D. Bull. Convergence rates of efficient global optimization algorithms. 12:2879-2904, 2011.
|
| 274 |
+
R. Calandra, N. Gopalan, A. Seyfarth, J. Peters, and M. Deisenroth. Bayesian gait optimization for bipedal locomotion. In P. Pardalos and M. Resende (eds.), Proceedings of the Eighth International Conference on Learning and Intelligent Optimization (LION'14), 2014.
|
| 275 |
+
Adel Ejeh, Leon Medvinsky, Aaron Councilman, Hemang Nehra, Suraj Sharma, Vikram Adve, Luigi Nardi, Eriko Nurvitadhi, and Rob A Rutenbar. Hpvm2fpga: Enabling true hardware-agnostic fpga programming. In Proceedings of the 33rd IEEE International Conference on Application-specific Systems, Architectures, and Processors, 2022.
|
| 276 |
+
|
| 277 |
+
David Eriksson, Michael Pearce, Jacob Gardner, Ryan D Turner, and Matthias Poloczek. Scalable global optimization via local Bayesian optimization. In Advances in Neural Information Processing Systems, pp. 5496-5507, 2019. URL http://papers.nips.cc/paper/8788-scalable-global-optimization-via-local-bayesian-optimization.pdf.
|
| 278 |
+
M. Feurer, Jost Tobias Springenberg, and F. Hutter. Initializing bayesian hyperparameter optimization via meta-learning. In Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, pp. 1128-1135, 2015.
|
| 279 |
+
M. Feurer, B. Letham, F. Hutter, and E. Bakshy. Practical transfer learning for bayesian optimization. ArXiv abs/1802.02219, 2018.
|
| 280 |
+
Matthias Feurer, Benjamin Letham, Frank Hutter, and Eytan Bakshy. Practical transfer learning for Bayesian optimization. arXiv preprint 1802.02219, 2022.
|
| 281 |
+
Peter Frazier, Warren Powell, and Savas Dayanik. The knowledge-gradient policy for correlated normal beliefs. INFORMS journal on Computing, 21(4):599-613, 2009.
|
| 282 |
+
R. Garnett. Bayesian Optimization. Cambridge University Press, 2022. Available for free at https://bayesoptbook.com/.
|
| 283 |
+
Ryan-Rhys Griffiths and José Miguel Hernández-Lobato. Constrained bayesian optimization for automatic chemical design using variational autoencoders. Chemical Science, 2020.
|
| 284 |
+
P. Hennig and C. J. Schuler. Entropy search for information-efficient global optimization. Journal of Machine Learning Research, 13(1):1809-1837, June 2012. ISSN 1532-4435.
|
| 285 |
+
J. M. Hernández-Lobato, M. W. Hoffman, and Z. Ghahramani. Predictive entropy search for efficient global optimization of black-box functions. In Advances in Neural Information Processing Systems, 2014. URL https://proceedings.neurips.cc/paper/2014/file/069d3bb002acd8d7dd095917f9efe4cb-Paper.pdf.
|
| 286 |
+
Jose Miguel Hernández-Lobato, Michael Gelbart, Matthew Hoffman, Ryan Adams, and Zoubin Ghahramani. Predictive entropy search for bayesian optimization with unknown constraints. In International conference on machine learning, pp. 1699-1707. PMLR, 2015.
|
| 287 |
+
Daolang Huang, Louis Filstroff, Petrus Mikkola, Runkai Zheng, and Samuel Kaski. Bayesian optimization augmented with actively elicited expert knowledge, 2022.
|
| 288 |
+
F. Hutter, H. Hoos, and K. Leyton-Brown. Sequential model-based optimization for general algorithm configuration. In C. Coello (ed.), Proceedings of the Fifth International Conference on Learning and Intelligent Optimization (LION'11), volume 6683, pp. 507-523, 2011.
|
| 289 |
+
Carl Hvarfner, Frank Hutter, and Luigi Nardi. Joint entropy search for maximally-informed bayesian optimization. In Proceedings of the 36th International Conference on Neural Information Processing Systems, 2022a.
|
| 290 |
+
Carl Hvarfner, Danny Stoll, Artur Souza, Marius Lindauer, Frank Hutter, and Luigi Nardi. PiBO: Augmenting Acquisition Functions with User Beliefs for Bayesian Optimization. In International Conference on Learning Representations, 2022b.
|
| 291 |
+
Carl Hvarfner, Erik Hellsten, Frank Hutter, and Luigi Nardi. Self-correcting bayesian optimization through bayesian active learning. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=dX9MjUtP1A.
|
| 292 |
+
Taewon Jeong and Heeyoung Kim. Objective bound conditional gaussian process for bayesian optimization. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pp. 4819-4828. PMLR, 18-24 Jul 2021. URL https://proceedings.mlr.press/v139/jeong21a.html.
|
| 293 |
+
D. Jones, M. Schonlau, and W. Welch. Efficient global optimization of expensive black-box functions. Journal of Global Optimization, 13:455-492, 12 1998. doi: 10.1023/A:1008306431147.
|
| 294 |
+
|
| 295 |
+
A G Journel and C J Huijbregts. Mining geostatistics, Jan 1976.
|
| 296 |
+
K. Kandasamy, G. Dasarathy, J. Oliva, J. Schneider, and B. Poczos. Gaussian Process Bandit Optimisation with Multi-fidelity Evaluations. In D. Lee, M. Sugiyama, U. von Luxburg, I. Guyon, and R. Garnett (eds.), Proceedings of the 30th International Conference on Advances in Neural Information Processing Systems (NeurIPS'16), pp. 992-1000, 2016.
|
| 297 |
+
K. Kandasamy, A. Krishnamurthy, J. Schneider, and B. Poczos. Parallelised Bayesian optimisation via Thompson sampling. In A. Storkey and F Perez-Cruz (eds.), Proceedings of the 21st International Conference on Artificial Intelligence and Statistics (AISTATS), volume 84, pp. 133-142. Proceedings of Machine Learning Research, 2018.
|
| 298 |
+
Diederik P Kingma and Max Welling. Auto-encoding variational bayes, 2014. URL https://arxiv.org/abs/1312.6114.
|
| 299 |
+
Arun Kumar, Santu Rana, Alistair Shilton, and Svetha Venkatesh. Human-ai collaborative bayesian optimisation. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems, volume 35, pp. 16233-16245. Curran Associates, Inc., 2022. URL https://proceedings.neurips.cc/paper_files/paper/2022/file/6751611b394a3464cea53eed91cf163c-Paper-Conference.pdf.
|
| 300 |
+
H. J. Kushner. A New Method of Locating the Maximum Point of an Arbitrary Multipeak Curve in the Presence of Noise. Journal of Basic Engineering, 86(1):97-106, 03 1964. ISSN 0021-9223. doi: 10.1115/1.3653121. URL https://doi.org/10.1115/1.3653121.
|
| 301 |
+
B. Letham, K. Brian, G. Ottoni, and E. Bakshy. Constrained Bayesian optimization with noisy experiments. Bayesian Analysis, 2018.
|
| 302 |
+
Jihao Andreas Lin, Javier Antorán, Shreyas Padhy, David Janz, José Miguel Hernández-Lobato, and Alexander Terenin. Sampling from gaussian process posteriors using stochastic gradient descent. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=Sf9goJtTCE.
|
| 303 |
+
Marius Lindauer, Katharina Eggensperger, Matthias Feurer, André Biedenkapp, Difan Deng, Carolin Benjamins, Tim Ruhkopf, René Sass, and Frank Hutter. Smac3: A versatile bayesian optimization package for hyperparameter optimization. Journal of Machine Learning Research, 23(54):1-9, 2022. URL http://jmlr.org/papers/v23/21-0888.html.
|
| 304 |
+
Neeratyoy Mallik, Edward Bergman, Carl Hvarfner, Danny Stoll, Maciej Janowski, Marius Lindauer, Luigi Nardi, and Frank Hutter. Priorband: Practical hyperparameter optimization in the age of deep learning. arXiv preprint 2306.12370, 2023.
|
| 305 |
+
B. Matérn. Spatial variation. Meddelanden fran Statens Skogsforskningsinstitut, 1960.
|
| 306 |
+
Matthias Mayr, Carl Hvarfner, Konstantinos Chatzilygeroudis, Luigi Nardi, and Volker Krueger. Learning skill-based industrial robot tasks with user priors. IEEE 18th International Conference on Automation Science and Engineering, 2022. URL https://arxiv.org/abs/2208.01605.
|
| 307 |
+
J. Mockus, V. Tiesis, and A. Zilinskas. The application of Bayesian methods for seeking the extremum. Towards Global Optimization, 2(117-129):2, 1978.
|
| 308 |
+
Henry B. Moss, David S. Leslie, Javier Gonzalez, and Paul Rayson. Gibbon: General-purpose information-based bayesian optimisation. Journal of Machine Learning Research, 22(235):1-49, 2021. URL http://jmlr.org/papers/v22/21-0120.html.
|
| 309 |
+
Samuel Müller, Noah Hollmann, Sebastian Pineda Arango, Josif Grabocka, and Frank Hutter. Transformers can do bayesian inference. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=KSugKcbNf9.
|
| 310 |
+
|
| 311 |
+
Samuel Müller, Matthias Feurer, Noah Hollmann, and Frank Hutter. PFNs4BO: In-context learning for Bayesian optimization. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pp. 25444-25470. PMLR, 23-29 Jul 2023. URL https://proceedings.mlr.press/v202/muller23a.html.
|
| 312 |
+
Mojmir Mutny and Andreas Krause. Efficient high dimensional bayesian optimization with additivity and quadrature fourier features. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. URL https://proceedings.neurips.cc/paper_files/paper/2018/file/4e5046fc8d6a97d18a5f54beaed54dea-Paper.pdf.
|
| 313 |
+
L. Nardi, D. Koeplinger, and K. Olukotun. Practical design space exploration. In 2019 IEEE 27th International Symposium on Modeling, Analysis, and Simulation of Computer and Telecommunication Systems (MASCOTS), pp. 347-358. IEEE, 2019.
|
| 314 |
+
Willie Neiswanger, Ke Alexander Wang, and Stefano Ermon. Bayesian algorithm execution: Estimating computable properties of black-box functions using mutual information. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pp. 8005-8015. PMLR, 18-24 Jul 2021. URL https://proceedings.mlr.press/v139/neiswanger21a.html.
|
| 315 |
+
Vu Nguyen and Michael A. Osborne. Knowing the what but not the where in Bayesian optimization. In Hal Daume III and Aarti Singh (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 7317-7326. PMLR, 13-18 Jul 2020. URL https://proceedings.mlr.press/v119/nguyen20d.html.
|
| 316 |
+
C. Oh, E. Gavves, and M. Welling. BOCK: Bayesian optimization with cylindrical kernels. In International Conference on Machine Learning, pp. 3865-3874, 2018.
|
| 317 |
+
V. Perrone, H. Shen, M. Seeger, C. Archambeau, and R. Jenatton. Learning search spaces for bayesian optimization: Another view of hyperparameter transfer learning. In Advances in Neural Information Processing Systems, 2019.
|
| 318 |
+
Ali Rahimi and Benjamin Recht. Random features for large-scale kernel machines. In J. Platt, D. Koller, Y. Singer, and S. Roweis (eds.), Advances in Neural Information Processing Systems, volume 20. Curran Associates, Inc., 2007. URL https://proceedings.neurips.cc/paper_files/paper/2007/file/013a006f03dbc5392effeb8f18fda755-Paper.pdf.
|
| 319 |
+
C. Rasmussen and C. Williams. Gaussian Processes for Machine Learning. The MIT Press, 2006.
|
| 320 |
+
Danilo Jimenez Rezende, Shakir Mohamed, and Daan Wierstra. Stochastic backpropagation and approximate inference in deep generative models. In Eric P. Xing and Tony Jebara (eds.), Proceedings of the 31st International Conference on Machine Learning, volume 32 of Proceedings of Machine Learning Research, pp. 1278-1286, Beijing, China, 22-24 Jun 2014. PMLR. URL https://proceedings.mlr.press/v32/rezende14.html.
|
| 321 |
+
Jonas Rothfuss, Vincent Fortuin, Martin Josifoski, and Andreas Krause. Pacoh: Bayes-optimal meta-learning with pac-guarantees. In Proceedings of the 38th International Conference on Machine Learning, pp. 9116-9126, 2021a.
|
| 322 |
+
Jonas Rothfuss, Dominique Heyn, Jinfan Chen, and Andreas Krause. Meta-learning reliable priors in the function space. In Advances in Neural Information Processing Systems, volume 34, 2021b.
|
| 323 |
+
Binxin Ru, Xingchen Wan, Xiaowen Dong, and Michael Osborne. Interpretable neural architecture search via bayesian optimisation with weisfeiler-lehman kernels. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id= j9Rv7qXdXjd.
|
| 324 |
+
B. Shahriari, K. Swersky, Z. Wang, R. Adams, and N. de Freitas. Taking the human out of the loop: A review of Bayesian optimization. Proceedings of the IEEE, 104(1):148-175, 2016.
|
| 325 |
+
|
| 326 |
+
L. Smith. A disciplined approach to neural network hyper-parameters: Part 1-learning rate, batch size, momentum, and weight decay. arXiv preprint arXiv:1803.09820, 2018.
|
| 327 |
+
J. Snoek, H. Larochelle, and R. Adams. Practical Bayesian optimization of machine learning algorithms. In P. Bartlett, F. Pereira, C. Burges, L. Bottou, and K. Weinberger (eds.), Proceedings of the 26th International Conference on Advances in Neural Information Processing Systems (NeurIPS'12), pp. 2960-2968, 2012.
|
| 328 |
+
A. Souza, L. Nardi, L. Oliveira, K. Olukotun, M. Lindauer, and F. Hutter. Bayesian optimization with a prior for the optimum. In Machine Learning and Knowledge Discovery in Databases. Research Track - European Conference, ECML PKDD 2021, Bilbao, Spain, September 13-17, 2021, Proceedings, Part III, volume 12977 of Lecture Notes in Computer Science, pp. 265-296. Springer, 2021.
|
| 329 |
+
N. Srinivas, A. Krause, S. M. Kakade, and M. W. Seeger. Information-theoretic regret bounds for gaussian process optimization in the bandit setting. IEEE Transactions on Information Theory, 58(5):3250-3265, May 2012. ISSN 1557-9654. doi: 10.1109/tit.2011.2182033. URL http://dx.doi.org/10.1109/TIT.2011.2182033.
|
| 330 |
+
K. Swersky, J. Snoek, and R. Adams. Multi-task Bayesian optimization. In C. Burges, L. Bottou, M. Welling, Z. Ghahramani, and K. Weinberger (eds.), Proceedings of the 27th International Conference on Advances in Neural Information Processing Systems (NeurIPS'13), pp. 2004-2012, 2013.
|
| 331 |
+
W. Thompson. On the likelihood that one unknown probability exceeds another in view of the evidence of two samples. Biometrika, 25(3/4):285-294, 1933.
|
| 332 |
+
Michalis Titsias. Variational learning of inducing variables in sparse gaussian processes. In David van Dyk and Max Welling (eds.), Proceedings of the Twelfth International Conference on Artificial Intelligence and Statistics, volume 5 of Proceedings of Machine Learning Research, pp. 567-574, Hilton Clearwater Beach Resort, Clearwater Beach, Florida USA, 16-18 Apr 2009. PMLR. URL https://proceedings.mlr.press/v5/titsias09a.html.
|
| 333 |
+
Ben Tu, Axel Gandy, Nikolas Kantas, and Behrang Shafei. Joint entropy search for multi-objective bayesian optimization. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=ZChgD8OoGds.
|
| 334 |
+
Q. Wang, Y. Ming, Z. Jin, Q. Shen, D. Liu, M. J. Smith, K. Veeramachaneni, and H. Qu. Atmseer: Increasing transparency and controllability in automated machine learning. In Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems, CHI '19, pp. 1-12. Association for Computing Machinery, 2019.
|
| 335 |
+
Zi Wang and Stefanie Jegelka. Max-value entropy search for efficient bayesian optimization. In International Conference on Machine Learning (ICML), 2017.
|
| 336 |
+
Zi Wang, Clement Gehring, Pushmeet Kohli, and Stefanie Jegelka. Batched large-scale bayesian optimization in high-dimensional spaces. In Amos Storkey and Fernando Perez-Cruz (eds.), Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics, volume 84 of Proceedings of Machine Learning Research, pp. 745-754. PMLR, 09-11 Apr 2018. URL https://proceedings.mlr.press/v84/wang18c.html.
|
| 337 |
+
Zi Wang, George E. Dahl, Kevin Swersky, Chansoo Lee, Zachary Nado, Justin Gilmer, Jasper Snoek, and Zoubin Ghahramani. Pre-trained Gaussian processes for Bayesian optimization. arXiv preprint arXiv:2109.08215, 2023.
|
| 338 |
+
C. White, W. Neiswanger, and Y. Savani. BANANAS: Bayesian optimization with neural architectures for neural architecture search. In Q. Yang, K. Leyton-Brown, and Mausam (eds.), Proceedings of the Thirty-Fifth Conference on Artificial Intelligence (AAAI'21), pp. 10293-10301. Association for the Advancement of Artificial Intelligence, AAAI Press, 2021.
|
| 339 |
+
|
| 340 |
+
James Wilson, Frank Hutter, and Marc Deisenroth. Maximizing acquisition functions for bayesian optimization. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. URL https://proceedings.neurips.cc/paper/2018/file/498f2c21688f6451d9f5fd09d53edda7-Paper.pdf.
|
| 341 |
+
James T. Wilson, Riccardo Moriconi, Frank Hutter, and Marc Peter Deisenroth. The reparameterization trick for acquisition functions, 2017. URL https://arxiv.org/abs/1712.00424.
|
| 342 |
+
James T. Wilson, Viacheslav Borovitskiy, Alexander Terenin, Peter Mostowsky, and Marc Peter Deisenroth. Efficiently sampling functions from gaussian process posteriors. In International Conference on Machine Learning, 2020. URL https://arxiv.org/abs/2002.09309.
|
| 343 |
+
M. Wistuba, N. Schilling, and L. Schmidt-Thieme. Hyperparameter search space pruning - A new component for sequential model-based hyperparameter optimization. In A. Appice, P. Rodrigues, V. Costa, J. Gama, A. Jorge, and C. Soares (eds.), Machine Learning and Knowledge Discovery in Databases (ECML/PKDD'15), volume 9285, pp. 104-119, 2015.
|
| 344 |
+
Martin Wistuba and Josif Grabocka. Few-shot bayesian optimization with deep kernel surrogates. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=bJxgv5C3sYc.
|
| 345 |
+
Lucas Zimmer, Marius Thomas Lindauer, and Frank Hutter. Auto-pytorch tabular: Multi-fidelity metalearning for efficient and robust autodl. ArXiv, abs/2006.13799, 2020. URL https://apisemantic scholar.org/CorpusID:220041844.
|
| 346 |
+
|
| 347 |
+

|
| 348 |
+
Figure 8: Performance on the 6D LCBench hyperparameter tuning tasks of various deep learning pipelines. ColaBO substantially improves on the non-prior baselines for 3 out of five tasks. $\pi$ BO performs best on aggregate, and achieves the best acceleration in performance at early iterations.
|
| 349 |
+
|
| 350 |
+
# A LCBENCH BENCHMARKING
|
| 351 |
+
|
| 352 |
+
We evaluate all methods on five deep learning tasks (6D) from the LCBench (Zimmer et al., 2020) suite, utilizing priors from MF-Prior-Bench. The chosen tasks were the five tasks with available priors of the best (good) strength, as per the benchmark suite. Figure 8 shows the performance of all methods on the LCBench tasks. ColaBO improves substantially on the baseline approaches for 3 out of 5 tasks. $\pi$ BO is the overall best-performing method, followed by ColaBO-LogEI.
|
| 353 |
+
|
| 354 |
+
# B EXPERIMENTAL SETUP
|
| 355 |
+
|
| 356 |
+
# B.1 MODEL
|
| 357 |
+
|
| 358 |
+
We outline the model used and the budget allocated to the various MC approximations involved with ColaBO. For all experiments, we utilize MAP estimation of the hyperparameters, and update the hyperparameters at every iteration of BO. All hyperparameters - lengthscale, outputscale and observation noise $(\theta = \{\ell, \sigma_{\varepsilon}^{2}, \sigma_{f}^{2}\})$ are given conventional $\mathcal{LN}(0,1)$ prior, applied on normalized inputs and standardized outputs. Furthermore, we fit the constant $c$ of the mean function, assigning it a $\mathcal{N}(0,1)$ prior as well. In Tab. 1, we display the parameters of the MC approximations for various tasks. No. $f$ is the maximal number of functions used in the MC computation of the acquisition function. No. Reamples is the number of initial posterior draws maximally used for the re-sampling of functions from the posterior $p(f|\rho)$ . Lastly, . No. $f_*$ is the number of optimal values used in the computation of ColaBO-MES.
|
| 359 |
+
|
| 360 |
+
<table><tr><td>Task</td><td>No. f</td><td>No. RFFs</td><td>No. Resamples</td><td>No. f*</td></tr><tr><td>Synthetic Good</td><td>768</td><td>2048</td><td>1.5 * 105</td><td>32</td></tr><tr><td>Synthetic Bad</td><td>768</td><td>2048</td><td>1.5 * 105</td><td>32</td></tr><tr><td>PD1</td><td>512</td><td>4096</td><td>2 * 105</td><td>32</td></tr><tr><td>Appendix</td><td>512</td><td>1024</td><td>105</td><td>32</td></tr></table>
|
| 361 |
+
|
| 362 |
+
Table 1: Budget-related parameters of the Monte Carlo approximations for all tasks.
|
| 363 |
+
|
| 364 |
+
# B.2 BENCHMARKS
|
| 365 |
+
|
| 366 |
+
We outline the benchmarks used, their search spaces and the amount of synthetic noise added. When adding noise, we intend for the ratio of noise variance to total output range to be approximately equal across benchmarks.
|
| 367 |
+
|
| 368 |
+
# B.3 PRIORS
|
| 369 |
+
|
| 370 |
+
For synthetic benchmarks, the approximate optima of all included functions can be obtained in advance. Thus, the correctness of the prior is ultimately known in advance. For a function of dimensionality $d$ with optimum at $\boldsymbol{x}_{*}$ , the well-located prior is constructed by sampling an offset
|
| 371 |
+
|
| 372 |
+
<table><tr><td>Task</td><td>Dimensionality</td><td>σε</td><td>Search space</td></tr><tr><td>Hartmann (4D)</td><td>4</td><td>0.25</td><td>[0,1]D</td></tr><tr><td>Levy (5D)</td><td>5</td><td>0.5</td><td>[-5,5]D</td></tr><tr><td>Hartmann (6D)</td><td>6</td><td>0.25</td><td>[0,1]D</td></tr><tr><td>Rosenbrock (6D)</td><td>6</td><td>5</td><td>[-2.048,2.048]D</td></tr><tr><td>Stybtang (7D)</td><td>7</td><td>1</td><td>[-4,4]D</td></tr></table>
|
| 373 |
+
|
| 374 |
+
direction $\epsilon$ and scaling the offset by a dimensionality- and quality-specific term $c(d, q) = q\sqrt{d}$ . For the well-located prior on synthetic tasks, we use $q = 0.1$ , which implies that the optimum is located $10\%$ of the distance across the search space away from the optimum, and construct a Gaussian prior as
|
| 375 |
+
|
| 376 |
+
$$
|
| 377 |
+
\pi_ {\boldsymbol {x} _ {*}} (\boldsymbol {x}) \sim \mathcal {N} \left(\boldsymbol {x} _ {*} + c _ {d} \epsilon / | | \epsilon | |, \sigma_ {s}\right), \quad \epsilon \sim \mathcal {N} (0, I). \tag {16}
|
| 378 |
+
$$
|
| 379 |
+
|
| 380 |
+
with $\sigma_s = 25\%$ for all tasks and prior qualities. For our 20 runs of the well-located prior, this procedure yields us 20 unique priors per quality type, with identical offsets from the true optimum. No priors with a mode outside the search space were allowed, such priors were simply replaced. For the misinformed priors, we set $q = 1$ , guaranteeing that the mode of the prior will be outside of the search space, and subsequently relocating to the edge of the search space by its shortest path. Priors for all tasks are displayed in Tab. 3. For the PD1 tasks, the location for the priors were obtained from MF-Prior-Bench( https://github.com/automl/mf-prior-bench). However, these priors require offsetting in order to not be too strong, thus making subsequent BO obsolete. PD1 priors are provided in [0, 1]-normalized space for simplicity.
|
| 381 |
+
|
| 382 |
+
Table 2: Benchmarks used for the Bayesian optimization experiments.
|
| 383 |
+
|
| 384 |
+
<table><tr><td>Task</td><td>Location</td><td>Offset, good</td><td>Offset, bad</td><td>σs</td></tr><tr><td>Hartmann (4D)</td><td>[0.19, 0.19, 0.56, 0.26]</td><td>0.1√D</td><td>max</td><td>0.25</td></tr><tr><td>Levy (5D)</td><td>[1]D</td><td>1√D</td><td>max</td><td>2.5</td></tr><tr><td>Hartmann (6D)</td><td>[0.20, 0.15, 0.48, 0.28, 0.31, 0.66]</td><td>0.1√D</td><td>max</td><td>0.25</td></tr><tr><td>Rosenbrock (6D)</td><td>[1]D</td><td>0.4096√D</td><td>max</td><td>1.024</td></tr><tr><td>Stybtang (7D)</td><td>[-2.9]D</td><td>0.8√D</td><td>max</td><td>2</td></tr><tr><td>PD1-WMT</td><td>[0.90, 0.69, 0.02, 0.97]</td><td>0.05√D</td><td>N/A</td><td>0.25</td></tr><tr><td>PD1-CIFAR</td><td>[1, 0.80, 0.0, 0.0]</td><td>0.05√D</td><td>N/A</td><td>0.25</td></tr><tr><td>PD1-LM1B</td><td>[0.91, 0.67, 0.36, 0.85]</td><td>0.05√D</td><td>N/A</td><td>0.25</td></tr></table>
|
| 385 |
+
|
| 386 |
+
Table 3: ${\pi }_{{x}_{ * }}$ for synthetic BO tasks of both prior qualities and PD1.
|
| 387 |
+
|
| 388 |
+
# C ADDITIONAL EXPERIMENTS
|
| 389 |
+
|
| 390 |
+
We provide complementary experiments to those introduced in the main paper. Firstly, we display results when ColaBO is used with a prior $\pi_{f*}$ over the optimal value in Sec. C.2. In Sec. C.3, we demonstrate ColaBO:s extensibility to batch evaluations, seamlessly extending the work of (Wilson et al., 2017).
|
| 391 |
+
|
| 392 |
+
# C.1 SYNTHETIC MATERN KERNEL EXPERIMENTS
|
| 393 |
+
|
| 394 |
+
We evaluate ColaBO and all baselines on the synthetic tasks with a Matern-5/2 kernel and the good user belief over the optimum. We note that roughly half of all $\pi \mathsf{BO}$ runs struggle with numerical instability from iteration 60 onwards, which produces stagnation in performance and infrequent gains.
|
| 395 |
+
|
| 396 |
+
# C.2 MAX-VALUE PRIORS
|
| 397 |
+
|
| 398 |
+
We evaluate ColaBO with priors over the optimal value $\pi_{f*}$ in Figure 10. For each task, we place a Gaussian prior over the optimal value, centering it exactly at the optimal value. Notably, such a prior
|
| 399 |
+
|
| 400 |
+

|
| 401 |
+
Figure 9: ColaBO on the synthetic tasks with a Matern kernel. Due to the difficulty of the RFF approximation, ColaBO-LogEI struggles on Hartmann (6D), and ColaBO performance is marginally worse on aggregate.
|
| 402 |
+
|
| 403 |
+

|
| 404 |
+
Figure 10: ColaBO with priors over the optimal value. Terminal performance substantially increases on 3 out of 5 benchmarks (Levy, Hartmann (6D), Stybtang), and is approximately preserved on the final two. ColaBO-MES improves marginally more than ColaBO-LogEI when utilizing a prior $\pi_{f*}$ over the optimal value.
|
| 405 |
+
|
| 406 |
+
substantially influences the exploration-exploitation trade-off; if the prior suggests that the incumbent has a value close to the optimal one, we are encouraged to exploit as samples with well-above-optimal values in exploratory will be discarded. Conversely, we are heavily encouraged to explore if the current best observation holds a value that we believe is far from optimal. On Hartmann (6D), we can see this behavior at play. Initial performance is poorer for ColaBO than their respective baselines, presumably due to above-average exploration, but terminal performance is better.
|
| 407 |
+
|
| 408 |
+
# C.3 BATCH EVALUATIONS
|
| 409 |
+
|
| 410 |
+
We evaluate ColaBO on batch evaluations, utilizing the sequential greedy technique for MC acquisition functions from Wilson et al. (2018). Drop-off from sequential to batch evaluations is not evident from the plots, as ordering between sequential and batch varies with the benchmark. While unpredictable, we speculate that the altered exploration-exploitation trade-off provided by the batched acquisition function is occasionally beneficial in the presence of auxiliary user beliefs $\pi_{x*}$ .
|
| 411 |
+
|
| 412 |
+

|
| 413 |
+
Figure 11: $q = 1$ (sequential) and $q = 3$ batch evaluation on a subset of synthetic functions with well-located priors for ColaBO-LogEI and ColaBO-MES. Total function evaluations are plotted for both sequential and batched variants, leaving them with the same number of total function evaluations.
|
ageneralframeworkforuserguidedbayesianoptimization/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb01d4f5f664bab8b41207d9ccc93ff6d2779aa058727e6c69338365c5eaefbd
|
| 3 |
+
size 816054
|
ageneralframeworkforuserguidedbayesianoptimization/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:48851208952f7f38836f4dcd8c56afd9f5c70b58dec78710a5dcaeb0ed7ee1ce
|
| 3 |
+
size 617777
|
ahierarchicalbayesianmodelforfewshotmetalearning/835d9bdc-b1d0-4584-861e-7d0b76aaea95_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10cf95b1d4857bcff50cc15b164f45005b253c43f245be26d3c9bec6422fd2af
|
| 3 |
+
size 211644
|
ahierarchicalbayesianmodelforfewshotmetalearning/835d9bdc-b1d0-4584-861e-7d0b76aaea95_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d1b7c5b2da6536d36f7895b4fb46680f897fcaebc6320e33d3743d6934cc307
|
| 3 |
+
size 248539
|
ahierarchicalbayesianmodelforfewshotmetalearning/835d9bdc-b1d0-4584-861e-7d0b76aaea95_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:119d8cd60f1f393f4ac95496116e44a2ccddaaf1effdfb7220cba980aa3fc2c0
|
| 3 |
+
size 1119865
|
ahierarchicalbayesianmodelforfewshotmetalearning/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ahierarchicalbayesianmodelforfewshotmetalearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:29629dd81d042c0846db86a3304e332cd9bfde69556e2b02af073c3748317868
|
| 3 |
+
size 1565799
|
ahierarchicalbayesianmodelforfewshotmetalearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ca604b459dd44ed0a3b168cf532621d393d6b05b196acd9e31931e883973d586
|
| 3 |
+
size 1286864
|
alightweightmethodfortacklingunknownparticipationstatisticsinfederatedaveraging/95f5339c-dfa7-49a6-b487-698cb4f07243_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e8c21f5e6a8063063ef0272a8acf297ac4a66ed721bed3c261dce303b1a176e8
|
| 3 |
+
size 251059
|
alightweightmethodfortacklingunknownparticipationstatisticsinfederatedaveraging/95f5339c-dfa7-49a6-b487-698cb4f07243_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bd090175e38e3faa00e3880cfea740f4814364d5def76718f74d4c069c951664
|
| 3 |
+
size 290174
|