|
|
--- |
|
|
license: apache-2.0 |
|
|
language: |
|
|
- en |
|
|
--- |
|
|
|
|
|
The UserGPT ```Socratic``` trained on LLaMA-1-7b base model. |
|
|
|
|
|
github link: |
|
|
|
|
|
https://github.com/FreedomIntelligence/PlatoLM |
|
|
|
|
|
paper link: |
|
|
|
|
|
https://arxiv.org/abs/2308.11534v6 |
|
|
|
|
|
--- |
|
|
Citation: |
|
|
--- |
|
|
``` |
|
|
@inproceedings{kong2024platolm, |
|
|
title={PlatoLM: Teaching LLMs in Multi-Round Dialogue via a User Simulator}, |
|
|
author={Kong, Chuyi and Fan, Yaxin and Wan, Xiang and Jiang, Feng and Wang, Benyou}, |
|
|
booktitle={Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, |
|
|
pages={7841--7863}, |
|
|
year={2024} |
|
|
} |
|
|
``` |