KennedyOfficaly's picture
Upload 2 files
66e3bbc verified
@inproceedings{Vaswani2017,
author = {Ashish Vaswani and Noam Shazeer and Niki Parmar and Jakob Uszkoreit and Llion Jones and Aidan N. Gomez and {\L}ukasz Kaiser and Illia Polosukhin},
title = {Attention is All You Need},
booktitle = {Advances in Neural Information Processing Systems (NeurIPS)},
volume = {30},
pages = {5998--6008},
year = {2017}
}
@article{Chen2021,
author = {Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser and Mohammad Bavarian and Clemens Winter and Philippe Tillet and Felipe Petroski Such and Dave Cummings and Matthias Plappert and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain and William Saunders and Christopher Hesse and Andrew N. Carr and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa and Alec Radford and Matthew Knight and Miles Brundin and Nick McGrew and Bob Barret and Bob McGrew and Sam McCandlish and Amodei Dario and Sutskever Ilya},
title = {Evaluating Large Language Models Trained on Code},
journal = {arXiv preprint arXiv:2107.03374},
year = {2021}
}
@article{Austin2021,
author = {Jacob Austin and Augustus Odena and Maxwell Nye and Maarten Bosma and Henryk Michalewski and David Dohan and Ellen Jiang and Carrie Cai and Michael Terry and Quoc Le and Charles Sutton},
title = {Program Synthesis with Large Language Models},
journal = {arXiv preprint arXiv:2108.07732},
year = {2021}
}
@inproceedings{Hu2021,
author = {Edward J. Hu and Yelong Shen and Phillip Wallis and Zeyuan Allen-Zhu and Yuanzhi Li and Shean Wang and Lu Wang and Weizhu Chen},
title = {{LoRA}: Low-Rank Adaptation of Large Language Models},
booktitle = {International Conference on Learning Representations (ICLR)},
year = {2022}
}
@inproceedings{Dettmers2023,
author = {Tim Dettmers and Artidoro Pagnoni and Ari Holtzman and Luke Zettlemoyer},
title = {{QLoRA}: Efficient Finetuning of Quantized Language Models},
booktitle = {Advances in Neural Information Processing Systems (NeurIPS)},
volume = {36},
year = {2023}
}
@article{Roziere2023,
author = {Baptiste Roziere and Jonas Gehring and Fabian Gloeckle and Sten Sootla and Itai Gat and Xiaoqing Ellen Tan and Yossi Adi and Jingyu Liu and Romain Sauvestre and Tal Remez and J{\'e}r{\'e}my Rapin and Artyom Kozhevnikov and Ivan Evtimov and Joanna Bitton and Manish Bhatt and Cristian Canton Ferrer and Aaron Grattafiori and Wenhan Xiong and Alexandre D{\'e}fossez and Jade Copet and Faisal Azhar and Hugo Touvron and Louis Martin and Nicolas Usunier and Thomas Scialom and Gabriel Synnaeve},
title = {{Code Llama}: Open Foundation Models for Code},
journal = {arXiv preprint arXiv:2308.12950},
year = {2023}
}
@article{Li2023,
author = {Raymond Li and Loubna Ben Allal and Yangtian Zi and Niklas Muennighoff and Denis Kocetkov and Chenghao Mou and Marc Marone and Christopher Akiki and Jia Li and Jenny Chim and Qian Liu and Evgenii Zheltonozhskii and Terry Yue Zhuo and Thomas Wang and Olivier Dehaene and Mishig Davaadorj and Joel Lamy-Poirier and Jo{\~a}o Monteiro and Oleh Shliazhko and Nicolas Gontier and Nicholas Meade and Armel Zebaze and Ming-Ho Yee and Logesh Kumar Umapathi and Jian Zhu and Benjamin Lipkin and Muhtasham Oblokulov and Zhiruo Wang and Rudra Murthy and Jason Stillerman and Siva Sankalp Patel and Dmitry Abulkhanov and Marco Zocca and Manan Dey and Zhihan Zhang and Nour Fahmy and Urvashi Bhattacharyya and Wenhao Yu and Swayam Singh and Sasha Luccioni and Paulo Villegas and Maxim Kunakov and Fedor Zhdanov and Manuel Romero and Tony Lee and Nadav Timor and Jennifer Ding and Claire Schlesinger and Hailey Schoelkopf and Jan Ebert and Tri Dao and Mayank Mishra and Alex Gu and Jennifer Robinson and Carolyn Jane Anderson and Brendan Dolan-Gavitt and Danish Contractor and Siva Reddy and Daniel Fried and Dzmitry Bahdanau and Yacine Jernite and Carlos Mu{\~n}oz Ferrandis and Sean Hughes and Thomas Wolf and Arjun Guha and Leandro von Werra and Harm de Vries},
title = {{StarCoder}: May the Source Be with You!},
journal = {arXiv preprint arXiv:2305.06161},
year = {2023}
}
@article{Wang2024,
author = {Binyuan Hui and Jian Yang and Zeyu Cui and Jiaxi Yang and Dayiheng Liu and Lei Zhang and Tianyu Liu and Jiajun Zhang and Bowen Yu and Kai Dang and An Yang and Rui Men and Fei Huang and Xingzhang Ren and Xuancheng Ren and Jingren Zhou and Junyang Lin},
title = {{Qwen2.5-Coder}: Coding the World, Together!},
journal = {arXiv preprint arXiv:2409.12186},
year = {2024}
}
@inproceedings{Nawrot2022,
author = {Piotr Nawrot and Szymon Tworkowski and Michał Tyrolski and {\L}ukasz Kaiser and Yuhuai Wu and Christian Szegedy and Henryk Michalewski},
title = {Hierarchical Transformers Are More Efficient Language Models},
booktitle = {Findings of the Association for Computational Linguistics: NAACL},
pages = {1559--1571},
year = {2022}
}
@inproceedings{Xue2022,
author = {Linting Xue and Aditya Barua and Noah Constant and Rami Al-Rfou and Sharan Narang and Mihir Kale and Adam Roberts and Colin Raffel},
title = {{ByT5}: Towards a Token-Free Future with Pre-trained Byte-to-Byte Models},
booktitle = {Transactions of the Association for Computational Linguistics (TACL)},
volume = {10},
pages = {291--306},
year = {2022}
}
@article{Shazeer2020,
author = {Noam Shazeer},
title = {{GLU} Variants Improve Transformer},
journal = {arXiv preprint arXiv:2002.05202},
year = {2020}
}
@inproceedings{Su2024,
author = {Jianlin Yu and Lu Xing and Deping Li and others},
title = {{RoFormer}: Enhanced Transformer with Rotary Position Embedding},
booktitle = {Neurocomputing},
volume = {568},
pages = {127063},
year = {2024}
}
@article{Dettmers2024,
author = {Tim Dettmers and Artidoro Pagnoni and Luke Zettlemoyer},
title = {{Unsloth}: Efficient and Memory-Optimized Fine-Tuning},
journal = {Unpublished},
year = {2024}
}
@inproceedings{Lin2017,
author = {Tsung-Yi Lin and Piotr Doll{\'a}r and Ross Girshick and Kaiming He and Bharath Hariharan and Serge Belongie},
title = {Feature Pyramid Networks for Object Detection},
booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {2117--2125},
year = {2017}
}
@inproceedings{Ronneberger2015,
author = {Olaf Ronneberger and Philipp Fischer and Thomas Brox},
title = {{U-Net}: Convolutional Networks for Biomedical Image Segmentation},
booktitle = {Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
pages = {234--241},
publisher = {Springer},
year = {2015}
}
@article{Touvron2023,
author = {Hugo Touvron and Thibaut Lavril and Gautier Izacard and Xavier Martinet and Marie-Anne Lachaux and Timoth{\'e}e Lacroix and Baptiste Rozi{\`e}re and Naman Goyal and Eric Hambro and Faisal Azhar and Aurelien Rodriguez and Armand Joulin and Edouard Grave and Guillaume Lample},
title = {{LLaMA}: Open and Efficient Foundation Language Models},
journal = {arXiv preprint arXiv:2302.13971},
year = {2023}
}
@article{Penedo2024,
author = {Guilherme Penedo and Quentin Malartic and Daniel Hesslow and Ruxandra Cojocaru and Alessandro Cappelli and Hamza Alobeidli and Baptiste Pannier and Ebtesam Almazrouei and Julien Launay},
title = {The RefinedWeb Dataset for {Falcon} {LLM}: Outperforming Curated Corpora with Web Data Only},
journal = {Advances in Neural Information Processing Systems (NeurIPS)},
volume = {36},
year = {2024}
}
@inproceedings{Wei2022,
author = {Jason Wei and Maarten Bosma and Vincent Y. Zhao and Kelvin Guu and Adams Wei Yu and Brian Lester and Nan Du and Andrew M. Dai and Quoc V. Le},
title = {Finetuned Language Models Are Zero-Shot Learners},
booktitle = {International Conference on Learning Representations (ICLR)},
year = {2022}
}
@article{Taori2023,
author = {Rohan Taori and Ishaan Gulrajani and Tianyi Zhang and Yann Dubois and Xuechen Li and Carlos Guestrin and Percy Liang and Tatsunori B. Hashimoto},
title = {{Stanford Alpaca}: An Instruction-Following {LLaMA} Model},
journal = {GitHub Repository},
year = {2023}
}
@article{Luo2023,
author = {Ziyang Luo and Can Xu and Pu Zhao and Qingfeng Sun and Xiubo Geng and Wenxiang Hu and Chongyang Tao and Jing Ma and Qingwei Lin and Daxin Jiang},
title = {{Magicoder}: Source Code Is All You Need},
journal = {arXiv preprint arXiv:2312.02120},
year = {2023}
}
@article{Gunasekar2023,
author = {Suriya Gunasegaran and Yi Zhang and Jie Li and Sashank J. Reddi},
title = {Textbooks Are All You Need},
journal = {arXiv preprint arXiv:2306.11644},
year = {2023}
}
@article{Gu2024,
author = {Albert Gu and Tri Dao},
title = {{Mamba}: Linear-Time Sequence Modeling with Selective State Spaces},
journal = {arXiv preprint arXiv:2312.00752},
year = {2024}
}
@article{Gerganov2023,
author = {Georgi Gerganov},
title = {{llama.cpp}: Portable LLM Inference in {C/C++}},
journal = {GitHub Repository},
year = {2023}
}
@article{minbpe,
author = {Andrej Karpathy},
title = {minbpe: Minimal Byte Pair Encoding Tokenizer},
journal = {GitHub Repository},
year = {2024}
}
@article{Dao2023,
author = {Tri Dao and Daniel Y. Fu and Stefano Ermon and Atri Rudra and Christopher R{\'e}},
title = {{FlashAttention}: Fast and Memory-Efficient Exact Attention with {IO}-Awareness},
booktitle = {Advances in Neural Information Processing Systems (NeurIPS)},
volume = {35},
pages = {16344--16359},
year = {2022}
}
@article{Loshchilov2019,
author = {Ilya Loshchilov and Frank Hutter},
title = {{Decoupled Weight Decay Regularization}},
booktitle = {International Conference on Learning Representations (ICLR)},
year = {2019}
}
@article{Radford2019,
author = {Alec Radford and Jeff Wu and Rewon Child and David Luan and Dario Amodei and Ilya Sutskever},
title = {Language Models Are Unsupervised Multitask Learners},
journal = {OpenAI Blog},
year = {2019}
}
@article{Brown2020,
author = {Tom B. Brown and Benjamin Mann and Nick Ryder and Melanie Subbiah and Jared Kaplan and Prafulla Dhariwal and Arvind Neelakantan and Pranav Shyam and Girish Sastry and Amanda Askell and others},
title = {Language Models Are Few-Shot Learners},
booktitle = {Advances in Neural Information Processing Systems (NeurIPS)},
volume = {33},
pages = {1877--1901},
year = {2020}
}
@article{Kingma2015,
author = {Diederik P. Kingma and Jimmy Ba},
title = {{Adam}: A Method for Stochastic Optimization},
booktitle = {International Conference on Learning Representations (ICLR)},
year = {2015}
}
@article{Devlin2019,
author = {Jacob Devlin and Ming-Wei Chang and Kenton Lee and Kristina Toutanova},
title = {{BERT}: Pre-Training of Deep Bidirectional Transformers for Language Understanding},
booktitle = {Proceedings of NAACL-HLT},
pages = {4171--4186},
year = {2019}
}
@article{Kocetkov2023,
author = {Denis Kocetkov and Raymond Li and Loubna Ben Allal and Jia Li and Chenghao Mou and Carlos Mu{\~n}oz Ferrandis and Yacine Jernite and Margaret Mitchell and Sean Hughes and Thomas Wolf and Dzmitry Bahdanau and Leandro von Werra and Harm de Vries},
title = {The Stack: 3 {TB} of Permissively Licensed Source Code},
journal = {arXiv preprint arXiv:2211.15533},
year = {2023}
}
@article{Holtzman2020,
author = {Ari Holtzman and Jan Buys and Li Du and Maxwell Forbes and Yejin Choi},
title = {The Curious Case of Neural Text Degeneration},
booktitle = {International Conference on Learning Representations (ICLR)},
year = {2020}
}
@article{Press2022,
author = {Ofir Press and Noah A. Smith and Mike Lewis},
title = {Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation},
booktitle = {International Conference on Learning Representations (ICLR)},
year = {2022}
}
@article{Zhang2023,
author = {Biao Zhang and Rico Sennrich},
title = {{Root Mean Square Layer Normalization}},
booktitle = {Advances in Neural Information Processing Systems (NeurIPS)},
volume = {32},
year = {2019}
}
@article{Wortsman2023,
author = {Mitchell Wortsman and Peter J. Liu and Lechao Xiao and Katie Everett and Alexander A. Alemi and Aditya Azadeh and Rishi Bommasani and Fernando Pereira and Ekin Dogus Cubuk and Percy Liang and Yair Carmon and Jascha Sohl-Dickstein},
title = {Small-Scale Proxies for Large-Scale Transformer Training Instabilities},
journal = {arXiv preprint arXiv:2309.14322},
year = {2023}
}
@article{Hu2023,
author = {Zhiqiang Hu and Yihuai Lan and Lei Wang and Wanyu Xu and Ee-Peng Lim and Roy Ka-Wei Lee and Liang Bing and Soujanya Poria},
title = {{LLM-Adapters}: An Adapter Family for Parameter-Efficient Fine-Tuning of Large Language Models},
journal = {arXiv preprint arXiv:2304.01933},
year = {2023}
}
@article{Lian2024,
author = {Wenhan Lian and Guangxuan Xiao and Tri Dao and Song Han},
title = {Efficient Memory Management for Large Language Model Serving with {{PagedAttention}}},
booktitle = {Proceedings of the 29th Symposium on Operating Systems Principles (SOSP)},
pages = {611--626},
year = {2023}
}
@article{Ren2020,
author = {Shuo Ren and Daya Guo and Shuai Lu and Long Zhou and Shujie Liu and Dragomir Radev and Neel Sundaresan and Ming Zhou and Sajjan Shiva and Devang Parikh},
title = {{CodeBLEU}: a Method for Automatic Evaluation of Code Synthesis},
journal = {arXiv preprint arXiv:2009.10297},
year = {2020}
}
@misc{OmniCoder2025,
author = {Tesslate},
title = {{OmniCoder-9B}: A Frontier Open Coding Agent},
year = {2025},
url = {https://huggingface.co/Tesslate/OmniCoder-9B}
}
@inproceedings{yu2023megabyte,
author = {Lili Yu and D{\'a}niel Simig and Colin Flaherty and Armen Aghajanyan and Luke Zettlemoyer and Mike Lewis},
title = {{MEGABYTE}: Predicting Million-byte Sequences with Multiscale Transformers},
booktitle = {Advances in Neural Information Processing Systems (NeurIPS)},
year = {2023}
}
@inproceedings{haviv2025multiscale,
author = {Adi Haviv and others},
title = {A Hierarchical Architecture for Causal Million-Length Sequence Modeling},
booktitle = {International Conference on Machine Learning (ICML)},
year = {2025}
}
@inproceedings{xue2022byt5,
author = {Linting Xue and Aditya Barua and Noah Constant and Rami Al-Rfou and Sharan Narang and Mihir Kale and Adam Roberts and Colin Raffel},
title = {{ByT5}: Towards a Token-free Future with Pre-trained Byte-to-byte Models},
journal = {Transactions of the Association for Computational Linguistics},
volume = {10},
pages = {291--306},
year = {2022}
}
@misc{liu2025evabyte,
author = {Tianyu Liu and others},
title = {{EvaByte}: Efficient Byte-level Language Models at Scale},
year = {2025},
note = {University of Hong Kong}
}
@article{meta2024blt,
author = {{Meta AI}},
title = {Byte Latent Transformer: Patches Scale Better Than Tokens},
journal = {arXiv preprint},
year = {2024}
}
@misc{jin2025namo,
author = {Lucas Jin},
title = {{Namo-R1}: A CPU Realtime VLM in 500M},
year = {2025},
url = {https://github.com/lucasjinreal/Namo-R1}
}
@misc{huggingface2024smollm,
author = {{HuggingFace}},
title = {{SmolLM}: Small Language Models},
year = {2024},
url = {https://huggingface.co/HuggingFaceTB/SmolLM-1.7B}
}
@inproceedings{chen2024stacking,
author = {Zixiang Chen and others},
title = {Stacking Your Transformers: A Closer Look at Model Growth for Efficient {LLM} Pre-Training},
booktitle = {Advances in Neural Information Processing Systems (NeurIPS)},
year = {2024}
}
@inproceedings{lv2024preparing,
author = {Kai Lv and others},
title = {Preparing Lessons for Progressive Training of Transformers},
year = {2024}
}
@misc{huggingface2023agents,
author = {{HuggingFace}},
title = {Transformers Agents},
year = {2023},
url = {https://huggingface.co/docs/transformers/agents}
}