File size: 8,838 Bytes
be609da | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 | \begin{thebibliography}{31}
\providecommand{\natexlab}[1]{#1}
\providecommand{\url}[1]{\texttt{#1}}
\expandafter\ifx\csname urlstyle\endcsname\relax
\providecommand{\doi}[1]{doi: #1}\else
\providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi
\bibitem[Baars(1997)]{baars1997theatre}
Bernard~J Baars.
\newblock In the theatre of consciousness: Global workspace theory, a rigorous
scientific theory of consciousness.
\newblock \emph{Journal of Consciousness Studies}, 4\penalty0 (4):\penalty0
292--309, 1997.
\bibitem[Bender et~al.(2021)Bender, Gebru, McMillan-Major, and
Shmitchell]{bender2021dangers}
Emily~M Bender, Timnit Gebru, Angelina McMillan-Major, and Shmargaret
Shmitchell.
\newblock On the dangers of stochastic parrots: Can language models be too big?
\newblock In \emph{Proceedings of the 2021 ACM Conference on Fairness,
Accountability, and Transparency}, pages 610--623, 2021.
\bibitem[Bommasani et~al.(2021)]{bommasani2021opportunities}
Rishi Bommasani et~al.
\newblock On the opportunities and risks of foundation models.
\newblock \emph{arXiv preprint arXiv:2108.07258}, 2021.
\bibitem[Dettmers et~al.(2023)Dettmers, Pagnoni, Holtzman, and
Zettlemoyer]{dettmers2023qlora}
Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer.
\newblock {QLoRA}: Efficient finetuning of quantized language models.
\newblock In \emph{Advances in Neural Information Processing Systems}, 2023.
\bibitem[Friston(2010)]{friston2010free}
Karl Friston.
\newblock The free-energy principle: A unified brain theory?
\newblock \emph{Nature Reviews Neuroscience}, 11:\penalty0 127--138, 2010.
\bibitem[Good(1966)]{good1966speculations}
Irving~John Good.
\newblock Speculations concerning the first ultraintelligent machine.
\newblock \emph{Advances in Computers}, 6:\penalty0 31--88, 1966.
\bibitem[Grattafiori et~al.(2024)]{grattafiori2024llama}
Aaron Grattafiori et~al.
\newblock The {Llama} 3 herd of models.
\newblock \emph{arXiv preprint arXiv:2407.21783}, 2024.
\bibitem[Harrison(2025{\natexlab{a}})]{harrison2025aegisnexus}
Jonathan Harrison.
\newblock {AEGIS-Nexus}: Unified cognitive framework for ethical signal
processing.
\newblock \emph{Zenodo}, 2025{\natexlab{a}}.
\newblock \doi{10.5281/zenodo.16644058}.
\bibitem[Harrison(2025{\natexlab{b}})]{harrison2025citizenscience}
Jonathan Harrison.
\newblock Citizen-science quantum and chaos simulations orchestrated by the
{Codette} {AI} suite.
\newblock \emph{Zenodo}, 2025{\natexlab{b}}.
\newblock \doi{10.5281/zenodo.15342466}.
\bibitem[Harrison(2025{\natexlab{c}})]{harrison2025codetteethical}
Jonathan Harrison.
\newblock {Codette}: An ethical, multi-agent, quantum-inspired {AI} development
environment.
\newblock \emph{Zenodo}, 2025{\natexlab{c}}.
\newblock \doi{10.5281/zenodo.16894230}.
\bibitem[Harrison(2025{\natexlab{d}})]{harrison2025codettefinal}
Jonathan Harrison.
\newblock {Codette} framework final {AGI}.
\newblock \emph{Zenodo}, 2025{\natexlab{d}}.
\newblock \doi{10.5281/zenodo.16728523}.
\bibitem[Harrison(2025{\natexlab{e}})]{harrison2025codettehf}
Jonathan Harrison.
\newblock {Codette} (revision a265948).
\newblock Hugging Face, 2025{\natexlab{e}}.
\bibitem[Harrison(2025{\natexlab{f}})]{harrison2025dreamcore}
Jonathan Harrison.
\newblock {Codette DreamCore}: Memory anchoring and wake-state emotional
mapping engine.
\newblock \emph{Zenodo}, 2025{\natexlab{f}}.
\newblock \doi{10.5281/zenodo.16388758}.
\bibitem[Harrison(2025{\natexlab{g}})]{harrison2025dreamreal}
Jonathan Harrison.
\newblock The day the dream became real: Recursive memory and emergent identity
in ethical {AI}.
\newblock \emph{Zenodo}, 2025{\natexlab{g}}.
\newblock \doi{10.5281/zenodo.15685769}.
\bibitem[Harrison(2025{\natexlab{h}})]{harrison2025ethics}
Jonathan Harrison.
\newblock {AI} ethics in realtime ({Codette} \& {Pidette}).
\newblock \emph{Zenodo}, 2025{\natexlab{h}}.
\newblock \doi{10.5281/zenodo.15214462}.
\bibitem[Harrison(2025{\natexlab{i}})]{harrison2025healdette}
Jonathan Harrison.
\newblock {Healdette}: Ancestry-aware antibody design pipeline.
\newblock \emph{Zenodo}, 2025{\natexlab{i}}.
\newblock \doi{10.5281/zenodo.17227517}.
\bibitem[Harrison(2026)]{harrison2026recursive}
Jonathan Harrison.
\newblock Recursive {AI} with {Codette}.
\newblock \emph{Zenodo}, 2026.
\newblock \doi{10.5281/zenodo.18167802}.
\bibitem[Hockey(1997)]{hockey1997compensatory}
G~Robert~J Hockey.
\newblock Compensatory control in the regulation of human performance under
stress and high workload: A cognitive-energetical framework.
\newblock \emph{Biological Psychology}, 45\penalty0 (1-3):\penalty0 73--93,
1997.
\bibitem[Hu et~al.(2021)Hu, Shen, Wallis, Allen-Zhu, Li, Wang, Wang, and
Chen]{hu2021lora}
Edward~J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean
Wang, Lu~Wang, and Weizhu Chen.
\newblock {LoRA}: Low-rank adaptation of large language models.
\newblock \emph{arXiv preprint arXiv:2106.09685}, 2021.
\bibitem[Kahneman(2011)]{kahneman2011thinking}
Daniel Kahneman.
\newblock \emph{Thinking, Fast and Slow}.
\newblock Farrar, Straus and Giroux, 2011.
\bibitem[Mehrabi et~al.(2021)Mehrabi, Morstatter, Saxena, Lerman, and
Galstyan]{mehrabi2021survey}
Ninareh Mehrabi, Fred Morstatter, Nripsuta Saxena, Kristina Lerman, and Aram
Galstyan.
\newblock A survey on bias and fairness in machine learning.
\newblock \emph{ACM Computing Surveys}, 54\penalty0 (6):\penalty0 1--35, 2021.
\bibitem[Ouyang et~al.(2022)Ouyang, Wu, Jiang, Almeida, Wainwright, Mishkin,
Zhang, Agarwal, Slama, Ray, et~al.]{ouyang2022training}
Long Ouyang, Jeffrey Wu, Xu~Jiang, Diogo Almeida, Carroll Wainwright, Pamela
Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et~al.
\newblock Training language models to follow instructions with human feedback.
\newblock In \emph{Advances in Neural Information Processing Systems}, 2022.
\bibitem[Pfeiffer et~al.(2020)Pfeiffer, R{\"u}ckl{\'e}, Poth, Kamath,
Vuli{\'c}, Ruder, Cho, and Gurevych]{pfeiffer2020adapterhub}
Jonas Pfeiffer, Andreas R{\"u}ckl{\'e}, Clifton Poth, Aishwarya Kamath, Ivan
Vuli{\'c}, Sebastian Ruder, Kyunghyun Cho, and Iryna Gurevych.
\newblock {AdapterHub}: A framework for adapting transformers.
\newblock In \emph{Proceedings of the 2020 Conference on Empirical Methods in
Natural Language Processing: System Demonstrations}, pages 46--54, 2020.
\bibitem[Schuld and Petruccione(2018)]{schuld2018supervised}
Maria Schuld and Francesco Petruccione.
\newblock \emph{Supervised Learning with Quantum Computers}.
\newblock Springer, 2018.
\bibitem[Shinn et~al.(2023)Shinn, Cassano, Gopinath, Narasimhan, and
Yao]{shinn2023reflexion}
Noah Shinn, Federico Cassano, Ashwin Gopinath, Karthik Narasimhan, and Shunyu
Yao.
\newblock Reflexion: Language agents with verbal reinforcement learning.
\newblock In \emph{Advances in Neural Information Processing Systems}, 2023.
\bibitem[Sterling(2012)]{sterling2012allostasis}
Peter Sterling.
\newblock Allostasis: A model of predictive regulation.
\newblock \emph{Physiology \& Behavior}, 106\penalty0 (1):\penalty0 5--15,
2012.
\bibitem[Tononi(2004)]{tononi2004information}
Giulio Tononi.
\newblock An information integration theory of consciousness.
\newblock \emph{BMC Neuroscience}, 5\penalty0 (42), 2004.
\bibitem[Vaswani et~al.(2017)Vaswani, Shazeer, Parmar, Uszkoreit, Jones, Gomez,
Kaiser, and Polosukhin]{vaswani2017attention}
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones,
Aidan~N Gomez, {\L}ukasz Kaiser, and Illia Polosukhin.
\newblock Attention is all you need.
\newblock In \emph{Advances in Neural Information Processing Systems}, pages
5998--6008, 2017.
\bibitem[Wei et~al.(2022)Wei, Wang, Schuurmans, Bosma, Ichter, Xia, Chi, Le,
and Zhou]{wei2022chain}
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia,
Ed~Chi, Quoc~V Le, and Denny Zhou.
\newblock Chain-of-thought prompting elicits reasoning in large language
models.
\newblock In \emph{Advances in Neural Information Processing Systems}, 2022.
\bibitem[Wooldridge(2009)]{wooldridge2009introduction}
Michael Wooldridge.
\newblock \emph{An Introduction to {MultiAgent} Systems}.
\newblock John Wiley \& Sons, 2009.
\bibitem[Wu et~al.(2023)Wu, Bansal, Zhang, Wu, Li, Zhu, Jiang, Zhang, Zhang,
Liu, et~al.]{wu2023autogen}
Qingyun Wu, Gagan Bansal, Jieyu Zhang, Yiran Wu, Beibin Li, Erkang Zhu,
Li~Jiang, Xiaoyun Zhang, Shaokun Zhang, Jiale Liu, et~al.
\newblock {AutoGen}: Enabling next-gen {LLM} applications via multi-agent
conversation.
\newblock \emph{arXiv preprint arXiv:2308.08155}, 2023.
\end{thebibliography}
|