| { |
| "paper_id": "2022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:45:50.006917Z" |
| }, |
| "title": "A Sequence Modelling Approach to Question Answering in Text-Based Games", |
| "authors": [ |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Furman", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Cape Town", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Edan", |
| "middle": [], |
| "last": "Toledo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Cape Town", |
| "location": {} |
| }, |
| "email": "e.toledo@instadeep.com" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Shock", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Cape Town", |
| "location": { |
| "addrLine": "3 InstaDeep 4 INRS", |
| "settlement": "Montreal", |
| "country": "Canada" |
| } |
| }, |
| "email": "jonathan.shock@uct.ac.za" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Buys", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Cape Town", |
| "location": {} |
| }, |
| "email": "jbuys@cs.uct.ac.za" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Interactive Question Answering (IQA) requires an intelligent agent to interact with a dynamic environment in order to gather information necessary to answer a question. IQA tasks have been proposed as means of training systems to develop language or visual comprehension abilities. To this end, the Question Answering with Interactive Text (QAit) task was created to produce and benchmark interactive agents capable of seeking information and answering questions in unseen environments. While prior work has exclusively focused on IQA as a reinforcement learning problem, such methods suffer from low sample efficiency and poor accuracy in zero-shot evaluation. In this paper, we propose the use of the recently proposed Decision Transformer architecture to provide improvements upon prior baselines. By utilising a causally masked GPT-2 Transformer for command generation and a BERT model for question answer prediction, we show that the Decision Transformer achieves performance greater than or equal to current state-of-the-art RL baselines on the QAit task in a sample efficient manner. In addition, these results are achievable by training on sub-optimal random trajectories, therefore not requiring the use of online agents to gather data.", |
| "pdf_parse": { |
| "paper_id": "2022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Interactive Question Answering (IQA) requires an intelligent agent to interact with a dynamic environment in order to gather information necessary to answer a question. IQA tasks have been proposed as means of training systems to develop language or visual comprehension abilities. To this end, the Question Answering with Interactive Text (QAit) task was created to produce and benchmark interactive agents capable of seeking information and answering questions in unseen environments. While prior work has exclusively focused on IQA as a reinforcement learning problem, such methods suffer from low sample efficiency and poor accuracy in zero-shot evaluation. In this paper, we propose the use of the recently proposed Decision Transformer architecture to provide improvements upon prior baselines. By utilising a causally masked GPT-2 Transformer for command generation and a BERT model for question answer prediction, we show that the Decision Transformer achieves performance greater than or equal to current state-of-the-art RL baselines on the QAit task in a sample efficient manner. In addition, these results are achievable by training on sub-optimal random trajectories, therefore not requiring the use of online agents to gather data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Traditional methods for question answering (QA) and machine reading comprehension (MRC) are primarily concerned with the retrieval of declarative knowledge, that is, explicitly stated or static descriptions of entities in text documents or within a knowledge base (KB) (Trischler et al., 2017) . These models tend to answer questions primarily through basic pattern matching skills, further differentiating their abilities from those of humans. Conversely, procedural knowledge is the sequence of actions required to perform a task (Georgeff and Lansky, 1986) . To this end, interactive question answering (IQA) has been proposed as a framework for teaching MRC systems to gather the information necessary for question answering (Yuan et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 269, |
| "end": 293, |
| "text": "(Trischler et al., 2017)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 532, |
| "end": 559, |
| "text": "(Georgeff and Lansky, 1986)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 729, |
| "end": 748, |
| "text": "(Yuan et al., 2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "IQA requires an agent to interact with some dynamic environment in order to gather the required knowledge to answer a question (Gordon et al., 2018) . As such, the task is well-suited to be approached as a reinforcement learning (RL) problem. Yuan et al. (2019) proposed Question Answering using interactive text (QAit) as a means of testing the knowledge gathering capabilities of an agent required to answer a question about its environment. Here an agent interacts with a partially observable text-based environment, created using Microsoft TextWorld (C\u00f4t\u00e9 et al., 2018) , in order to gather information and answer questions about the attributes, location, and existence of objects. The QAit task thus aims to benchmark generalisation and provides an environment to train agents capable of gathering information and answering questions.", |
| "cite_spans": [ |
| { |
| "start": 127, |
| "end": 148, |
| "text": "(Gordon et al., 2018)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 243, |
| "end": 261, |
| "text": "Yuan et al. (2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 554, |
| "end": 573, |
| "text": "(C\u00f4t\u00e9 et al., 2018)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Yuan et al.'s proposed baselines (using DQN (Mnih et al., 2015) , DDQN (Van Hasselt et al., 2016) , and Rainbow (Hessel et al., 2018) ) all suffered from low sample efficiency and relatively poor performance on all three question types (location, attribute, and existence). These shortcomings suggest that alternative architectures and methodologies are required to improve performance within the QAit setting.", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 63, |
| "text": "(Mnih et al., 2015)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 66, |
| "end": 97, |
| "text": "DDQN (Van Hasselt et al., 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 112, |
| "end": 133, |
| "text": "(Hessel et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Transformers (Vaswani et al., 2017) have shown success in modelling a diverse range of highdimensional problems (Brown et al., 2020; Ramesh et al., 2021; Devlin et al., 2019) . Additionally, existing language models such as BERT (Bidirectional Encoder Representations from Transformers) and GPT (Generative Pre-Trained) (Radford et al., 2019) have been utilised to reduce the size of datasets required for training downstream language tasks (Lee and Hsiang, 2019; Mager et al., 2020) . These benefits coupled with the demon-strated ability of Transformers to model long sequences by utilising the self-attention mechanism makes this architecture ideal for IQA. Recent work (Chen et al., 2021; Janner et al., 2021) have shown the applicability of Transformers to sequential decision making problems as an alternative solution to RL problems. These approaches frame RL trajectories as sequences of states, actions, and rewards modelled autoregressively by a Transformer. This sequence modelling approach is referred to as the Decision Transformer (DT) (Chen et al., 2021) .", |
| "cite_spans": [ |
| { |
| "start": 13, |
| "end": 35, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 112, |
| "end": 132, |
| "text": "(Brown et al., 2020;", |
| "ref_id": null |
| }, |
| { |
| "start": 133, |
| "end": 153, |
| "text": "Ramesh et al., 2021;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 154, |
| "end": 174, |
| "text": "Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 320, |
| "end": 342, |
| "text": "(Radford et al., 2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 441, |
| "end": 463, |
| "text": "(Lee and Hsiang, 2019;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 464, |
| "end": 483, |
| "text": "Mager et al., 2020)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 673, |
| "end": 692, |
| "text": "(Chen et al., 2021;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 693, |
| "end": 713, |
| "text": "Janner et al., 2021)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1050, |
| "end": 1069, |
| "text": "(Chen et al., 2021)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper we apply the Decision Transformer to QAit, replacing the online interaction and training methodology of RL approaches with a Decision Transformer that utilises the GPT-2 (Radford et al., 2019) architecture, closely following the methodology outlined by (Chen et al., 2021) . We propose an additional QA module that is a fine-tuned BERT model, with the aim of leveraging pre-trained language models to provide more accurate answers to questions. We show that by framing the QAit task as a sequence modelling problem, a Decision Transformer matches or exceeds the performance of previous RL-based benchmarks when trained on random episodic rollouts, while using significantly less data. Our main contributions are as follows:", |
| "cite_spans": [ |
| { |
| "start": 184, |
| "end": 206, |
| "text": "(Radford et al., 2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 267, |
| "end": 286, |
| "text": "(Chen et al., 2021)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "1. We show that an offline reinforcement learning method is able to match the performance of online value-based reinforcement learning baselines in the QAit environment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2. We show that by framing IQA as a sequence modelling problem, the performance of the QAit baselines can be matched using significantly less training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "3. We show that the Decision Transformer architecture is able to learn policies comparable to those of online reinforcement learning methods from purely random data, illustrating the architecture's ability to find structure in inherently noisy data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "QAit is implemented in TextWorld 1 (C\u00f4t\u00e9 et al., 2018) , an open-source simulator for training reinforcement learning (RL) agents for decision making and language comprehension. QAit text-based environments are generated procedurally via sampling from a distribution of world settings. There are two environment map types: A fixed map contains six rooms, whereas random maps sample their number of rooms from a uniform distribution U (2, 12). QAit requires an agent to answer questions about the location, existence and attributes of objects in an environment. An agent interacts with a QAit environment using text commands that consist of an action, modifier, and object triplet, e.g., \"open black oven\". A generated environment consists of rooms each containing randomly assigned objects and location names. The agent moves around in the environment for a pre-defined number of time steps or until the predicted command action is \"wait\". TextWorld responds to agent commands with a state string containing information about the room the agent is in and the objects present.", |
| "cite_spans": [ |
| { |
| "start": 35, |
| "end": 54, |
| "text": "(C\u00f4t\u00e9 et al., 2018)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "QAit", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "An agent is required to answer one of three question types:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question types", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "\u2022 Location questions assess an agent's ability to navigate the environment to find the location of an object. For example, \"Where is copper key?\" could be answered with \"garden\" or \"toolbox\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question types", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "\u2022 Existence questions requires the agent to navigate and interact with the environment to gather knowledge and determine whether an object exists. Questions are phrased as \"is there any X in the world?\", where X is an entity in the vocabulary, and answers are either yes (\"1\") or no (\"0\").", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question types", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "\u2022 Attribute questions require that the agent interacts with an object to determine whether it has a particular characteristic or quality. For such question types, the level of interaction and movement required in observing a sufficient amount of information to answer a question greatly exceeds location and existence questions. Answers are also either yes or no. For example, \"Is stove hot?\" requires an agent to find and interact with \"stove\" to answer the question correctly. Comprehension of both the question and the environment are required. Entities often have arbitrary names and attributes, making memorisation impossible. (Chen et al., 2021) architecture as adapted to QAit. States S t and commands A t have their token sequences encoded with GRUs. An embedding is also learnt for the returns-to-go R t . Each of these three embeddings (S t , A t , R t ) are concatenated with a positional embedding for time step t, and fed into a GPT-2 causal Transformer. Commands are predicted autoregressively through linear decoders for the next command's action a t+1 , modifier m t+1 , and object o t+1 components. A fourth decoder predicts the answer to the question at each time step. Yuan et al. (2019) proposed two reward types: Sufficient Information: Sufficient information is a metric used to evaluate the amount of information gathered by the agent and whether or not the information was sufficient to answer the question (Yuan et al., 2019) . It is also used as part of the reward function. The sufficient information score is calculated when the agent decides to stop the interaction and answer the question. For each question type, the sufficient information score is calculated as follows:", |
| "cite_spans": [ |
| { |
| "start": 632, |
| "end": 651, |
| "text": "(Chen et al., 2021)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1188, |
| "end": 1206, |
| "text": "Yuan et al. (2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 1431, |
| "end": 1450, |
| "text": "(Yuan et al., 2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question types", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "\u2022 Location: A score of 1 is given if, when the agent decides to stop the interaction, the entity mentioned in the question is present in the final observation. This indicates the agent has witnessed the information it needs to answer the question successfully. If the mentioned entity is not present in the final observation then a score of 0 is given.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rewards", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "\u2022 Existence: If the true answer to the question is yes then a score of 1 is given if the entity mentioned in the question is present in the final observation. If the true answer to the question is no, then a score between 0 and 1 is given proportional to the amount of exploration coverage of the environment the agent has performed. Intuitively this can be seen as a confidence score -if the agent witnesses the entity, it is 100% confident of its existence; otherwise, until it explores the entire environment, it cannot be completely confident.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rewards", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "\u2022 Attribute: Attribute questions have a set of heuristics defined to verify each attribute and assign a score of sufficient information. Each attribute has specific commands that need to be executed for sufficient information to be gathered. This also depends on the agent being in certain states for these outcomes to be observed correctly, e.g. an agent needs to be holding an object to try to eat the object.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rewards", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "Exploration Reward: The agent is also given an exploration reward (Yuan et al., 2018) whenever entering a previously unseen state in order to promote exploration of the environment.", |
| "cite_spans": [ |
| { |
| "start": 66, |
| "end": 85, |
| "text": "(Yuan et al., 2018)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rewards", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "(Yuan et al., 2019) trained agents on multiple Number of Games settings, i.e., number of unique environments that an agent interacts with during train-ing. In this paper, we restrict our experiments to the 500 games setting when generating offline training data for the Decision Transformer. We measure an agent's performance through both sufficient information score and question answering accuracy. Models are evaluated in a zeroshot evaluation on the QAit test set in order to assess agents' generalisation abilities. Each question type and map type have their own unique set of 500 never-before-seen games, each containing a single question.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "2.1.3" |
| }, |
| { |
| "text": "The Decision Transformer (Chen et al., 2021) architecture approaches reinforcement learning problems by autoregressively modelling a trajectory of actions/commands, states, and rewards. Command triples (action, modifier, noun) are conditioned upon the total reward that can still be gathered from interacting with the environment. This is referred to as the returns-to-go (RTG)", |
| "cite_spans": [ |
| { |
| "start": 25, |
| "end": 44, |
| "text": "(Chen et al., 2021)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decision Transformer", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "R t = T t \u2032 =t r t \u2032", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decision Transformer", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where T is the trajectory length and r t is the reward at time step t. Thus the initial return-to-go R 1 represents the total reward to be gained from a given episode. After every episodic play-through, the trajectory is represented", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decision Transformer", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "as (R 1 , s 1 , a 1 , R 2 , s 2 , a 2 ...R T , s T , a T )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decision Transformer", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": ", where R t is the RTG, s is a state, and a an action/command.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decision Transformer", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "An example QAit trajectory is shown in Figure 2 . The trajectory representation enables training a sequence model such as GPT-2 (Radford et al., 2019) , as command prediction is based on gaining some future reward, rather than on how much reward has already been obtained. During testing the model is conditioned on total desired reward by setting R 1 and the starting state to generate command sequences autoregressively. If an agent obtains some reward while interacting with the world, this is deducted from its RTG in subsequent time steps.", |
| "cite_spans": [ |
| { |
| "start": 129, |
| "end": 151, |
| "text": "(Radford et al., 2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 39, |
| "end": 48, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Decision Transformer", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Training a Decision Transformer requires offline training data for supervised learning. Online reinforcement learning, in contrast, sees an agent continually interacting with the environment to gather experience and update its policy based on observed rewards.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We generate offline training data using random rollouts for each map type and question type in Table 1 : Size of each of the training datasets, i.e. number of trajectories generated with random rollouts. The average and maximum total rewards gained per trajectory are also given.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 95, |
| "end": 102, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training data generation with random rollouts", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "the 500 games setting. The rollouts are generated using a random agent which uniformly samples commands from all admissible commands for a particular time step. This restriction stems from the sparsity of the action space (approximately 1654 3 possible commands compared to approximately 8 admissible commands): sampling commands from the complete vocabulary results in mostly invalid commands. Thus, by only using admissible commands in data generation, we intend for the Decision Transformer to learn which command triplets are admissible (as this is unknown during testing). The sequence of commands and observed states are recorded along with the reward for each command. Training dataset statistics are given in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 717, |
| "end": 724, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training data generation with random rollouts", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The maximum trajectory input length of the Decision Transformer is set to K = 50 time steps, which is the maximum length of a QAit episode. This allows the DT to access the entire trajectory for command generation and question answering. Token embeddings for states and command sequences are obtained using a single embedding layer. At each time step the sequence of tokens representing the current state of the environment is encoded with a GRU (Cho et al., 2014) with the final hidden state h n representing the entire encoded environment state. We concatenate the question to the end of each state sequence, separated by a \"<|>\" delimiter token. Commands, which can consist of up to 3 tokens, are similarly encoded with another GRU. Embeddings for returns-to-go R t are also learnt and projected to the embedding dimension. Finally, a positional embedding representing the environment time step t is concatenated to each input (returns, states & commands) after the embedding and GRU layers. The embedded and positionally encoded command, state, and return-to-go inputs are fed into the GPT model. Figure 1 shows the Decision Transformer architecture with example input. At each time step t, the Decision Transformer encoding x t is fed into four linear decoders. Three decoders predict the next command's action, modifier, and object components, while the fourth decoder predicts the answer to the question (this is the same at each time step). Although we also use a separate QA module to predict the final answer, the answer decoder allows the Decision Transformer to learn some primitive level of question answering, thereby allowing the QA loss to help guide command generation. Chen et al. (2021) found that predicting states and returns-to-go at each time step did not improve performance. This motivates exclusively predicting command triples, along with answers to questions.", |
| "cite_spans": [ |
| { |
| "start": 446, |
| "end": 464, |
| "text": "(Cho et al., 2014)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1687, |
| "end": 1705, |
| "text": "Chen et al. (2021)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1101, |
| "end": 1109, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Decision Transformer", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The model is trained to optimize the sum of the cross-entropy losses of action, modifier, object, and answer prediction. For each question and map type configuration, a set of unique validation games were generated wherein an agent must interact with an environment to answer a question. During training, the Decision Transformer is evaluated on the set of hold-out games every 250 iterations to monitor sufficient information scores and to avoid overfitting.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decision Transformer", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The QA module consists of a pretrained BERT encoder with a linear classification layer. The per-time step state sequences are joined into a sin-gle long sequence of tokens with the question appended at the end. A [CLS] token is added to the beginning of the sequence, and a [SEP] token before and after the question. This concatenated sequence is tokenised by a Bert-Base-Uncased tokeniser (Devlin et al., 2019) and padded or fronttruncated (keeping the most recent part of the state sequence) to return a 512 token sequence which is then fed to the BERT encoder. We subsequently pass BERT's output vector corresponding to the CLS token to a linear layer. For attribute and existence questions this model performs binary classification (to predict \"yes\" or \"no\"), while for location questions it produces a softmax over the vocabulary. We use cross-entropy to calculate the loss between predicted and correct answers.", |
| "cite_spans": [ |
| { |
| "start": 390, |
| "end": 411, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "QA module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Training and validation sets that consist entirely of valid trajectories are created for the QA module. We use the validation set (20% of the generated trajectories) to save the model with the highest validation QA accuracy (after 30 training epochs). In order to simulate more realistic QA training data, we feed the QA training examples back into the trained DT and use it to predict where to cut off the generated sequence, as during testing there is no guarantee that the DT would have explored up until the correct answer has been found. The sequence is cut off when the DT predicts the stop action (wait) or the time step limit is exceeded.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "QA module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "To generate the command sequence from the DT during testing, instead of greedy decoding we sample each next command from the probability distri- butions over the action, modifier, and object. This motivation is similar to that of stochastic decoding algorithms in natural language generation: The stochasticity minimises the risk of the Decision Transformer entering a loop in which the same command is generated repeatedly, and more closely mirrors natural language which avoids utterances with too high probability (Zarrie\u00df et al., 2021). In the case of answer prediction, we deterministically take the argmax of the output.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "A shortcoming in the DT's methodology is the expectation for the environment's maximum achievable return to be known a priori. Using an inappropriate value can greatly hamper performance, resulting in premature halting or needlessly excessive exploration. Thus, we tune the value of the initial returns-to-go R 1 as a hyperparameter. We create a validation set of 50 games to evaluate the question answering performance of both the DT's answer prediction head and BERT model. For each question and map type we consider R 1 either set as a fixed value or sampled from an exponential distribution. Following the methodology used by Yuan et al. (2019) for selecting the best model during training, we tune R 1 to maximize the sum of the sufficient information score and question answering accuracy. See Appendix A for details.", |
| "cite_spans": [ |
| { |
| "start": 630, |
| "end": 648, |
| "text": "Yuan et al. (2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Returns Tuning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "We evaluate the Decision Transformer both where its own answer prediction head is used for questions answering and where this is done with the BERT QA model. Test set results on the 500 games setting are given in Table 2 , together with with RL model results as reported by Yuan et al. (2019) . We also report results of training the DT on reduced datasets with only 10,000 episodes, in order to further evaluate the sample efficiency of our approach (see section 4.5). Training results are available in the Appendix in Tables 7 and 8. Table 3 gives the BERT QA model's validation accuracy. We discuss the results for each question type.", |
| "cite_spans": [ |
| { |
| "start": 274, |
| "end": 292, |
| "text": "Yuan et al. (2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 213, |
| "end": 220, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 536, |
| "end": 543, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Overall, DT-BERT outperforms the Decision Transformer's answer prediction head in location and attribute type questions, while the DT gives a higher accuracy on existence questions. At a high level, these performance differences depend on the state and action space that the model was required to learn and navigate. Existence type and attribute type questions may depend on long-range dependencies. For example, existence type questions require the ability to know whether an object has been witnessed or not. When the answer is that an object doesn't exist, the problem is more than just word matching within the last few states. We believe that this is why the Decision Transformer QA head outperforms the BERT model on existence and attribute questions since it has access to the entire state trajectory. On questions whose answers were more likely to be found within the last 512 tokens, DT-BERT achieves higher question answering accuracy than the Decision Transformer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The DT's answer prediction head has a lower QA accuracy than previous RL approaches on both fixed and random maps. However its sufficient information scores, reflecting the DT's information gathering capacities, are higher. For location type questions, QA accuracy normally matches suffi- cient information results due to QA modules effectively performing word matching once an agent arrives in the correct state. We see this in the RL methods' results as their sufficient information scores are very close to their QA accuracies. This indicates that the DT's question answering prediction head is underfitting the training data. DT-BERT outperforms the QA accuracy of the RL models on both fixed and random maps. On random maps, QA accuracy is slightly higher than sufficient information, suggesting that in a small number of cases the BERT model may be able to deduce the answer from the context even when it does not explicitly appears in the trajectory. The performance gap between the BERT QA model and the DT means that a question can still be correctly answered even if the DT stops in an incorrect state. The high BERT QA accuracy for location questions can also be seen in Table 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1182, |
| "end": 1189, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Location Questions", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We suggest two reasons for the BERT QA model answering location type questions more accurately than the Decision Transformer's prediction head. First, it is easier for BERT model to learns skills basic pattern matching skills such as identifying entity and location names from state strings. Second, exploration is not as highly encouraged with location questions as with existence and attribute types. Less exploration means fewer states visited, allowing the state context window to contain less noisy state strings than other question types.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Location Questions", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The DT outperforms RL baselines on sufficient information and QA accuracy in the random maps setting for existence questions. However on fixed maps it performs worse than the DQN. The BERT QA model underperforms the DT answer prediction head here in both map types, suggesting that jointly optimising answer and command prediction leads to improved performance on existence type questions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Existence Questions", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Reasoning about the existence of an object within a TextWorld environment requires knowledge about the entirety of the world. Therefore, existential questions require an agent to fully explore an environment to answer whether or not an entity exists within it. The Decision Transformer's selfattention mechanism makes performing long-term credit assignments possible. The answer prediction head of the DT can thus draw upon information gathered in all previous states to inform question answering. As a result, the ability to model dependencies that stretch throughout all states encountered allows the DT to outperform the BERT model, whose context window is constrained to 512 tokens.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Existence Questions", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "None of the models achieve results that are substantially above 50% on attribute questions, confirming the challenge of this question type. The Decision Transformer did obtain higher sufficient information than all RL baselines. DT-BERT obtains higher QA accuracies than the DT answer prediction head; it obtains the highest QA accuracy among all the models on random maps, and performs slightly worse than the DQN on fixed maps.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attribute Questions", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Despite the Decision Transformer's ability to learn long-term dependencies via its attention mechanism, we posit that the contextualised embeddings of BERT are able to model a richer semantic representation of TextWorld's state-strings than the embeddings learnt by the DT. This better capturing of the semantic space enables BERT to more fully utilise the context with which it was provided by using pre-existing understanding to help answer questions posed in natural language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attribute Questions", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Based on validation set performance, the optimal initial return-to-go for location type questions was determined to be 2.0 for both fixed and random map settings. This is lower than for existence and attribute types, indicating that exploration is not as highly encouraged. In location type questions, the entity definitively exists somewhere within the environment. This means that the action space required to answer questions of locality is reduced to traversals and basic interactions with containers. Therefore, less exploration is needed as the information to answer a question is more easily ac-quired. Too high an initial reward would promote unnecessary actions with a high likelihood of leading the agent astray from stopping in the correct state.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rewards and Performance", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Existence questions require far more exploration of an environment than location type questions. Higher starting rewards reflect this need for greater exploration and are associated with better QA and sufficient information scores, as seen in Table 5 in the Appendix. These higher values promote a more complete traversal of the world, allowing for gathering information required to answer the question. However, too high an initial reward means that entering a correct state and receiving a reward of 1.0 may not affect the model's decision making. If the DT has a current RTG of 5.0 and enters the correct state that rewards 1.0, the RTG from then onwards is 4.0. The return-to-go of 4.0 does not suggest to the model that it has entered the correct state, meaning it carries on exploring and gathering information. Likewise, too small a reward could prematurely cause an agent to stop exploring due to gaining rewards for entering new states via the exploration bonus. Therefore, we observe that the best RTG values err on the larger side, which encourages greater world exploration.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 243, |
| "end": 250, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Rewards and Performance", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Attribute type questions are considered the most sparsely rewarded of all three types (Yuan et al., 2019) . We therefore expected higher rewards to be associated with better accuracies. The results, however, paint a different picture. In a fixed map, where the state space is, on average, smaller than that of random maps, we see that a smaller reward yields the best score. This reduction is likely a result of the reduced state and action space making too much exploration and interaction with the environment degrade performance. On the other hand, in a random map setting higher rewards yields better QA and sufficient information scores, allowing us to conclude that higher rewards promote more exploration and thus allows the model to better answer the question.", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 105, |
| "text": "(Yuan et al., 2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rewards and Performance", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The RL agents in QAit were trained for more than 200K episodes. In comparison, most of our Decision Transformers were trained on around 40K episodes ( Table 1) . The test set results therefore show that DT is able to match or outperform the previous RL methods when trained on approximately 25% of the number of episodes. Moreover, all training data used for the DT was generated via random rollouts -indicating that the Decision Transformer has the ability to learn optimal policies from suboptimal data. We also found that fine-tuning a BERT model for QA on the random rollout data works well, as long as the DT is used to determine where to cut off the trajectory.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 151, |
| "end": 159, |
| "text": "Table 1)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sample Efficiency", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "In order to further elucidate the DT's sample efficient learning capabilities, we generated new datasets for all question and map types that only contained 10 thousand episodes. The validation results can be seen Table 6 in the Appendix. These experiments indicate that the DT trained on even fewer offline trajectories can achieve results on par with or better than both previous baselines as well as identical models trained on more data. Here we see fixed map sufficient information scores being improved for all question types and QA accuracy increasing for attribute and existence questions. However, QA accuracy for location type questions is worse than previous baselines in both random and fixed maps (see Table 2 ). While the results are not consistently better, they do further indicate the sample efficiency of the Decision Transformer.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 213, |
| "end": 220, |
| "text": "Table 6", |
| "ref_id": null |
| }, |
| { |
| "start": 714, |
| "end": 721, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sample Efficiency", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "We showed that interactive question answering can be framed as a sequence modelling problem by training Transformers for action generation and answer prediction using random roll-outs. Results show that the Decision Transformer approach matches or outperforms current reinforcement learning approaches for QAit on most question types and maps type configurations in the 500 game setting. Additionally, the approach is more sample efficient than reinforcement learning approaches, reducing the amount of training data required even though the data generated via random rollouts is suboptimal. Fine-tuning a BERT model for question answering on the same generated dataset improves performance over using the Decision Transformer directly for question answering in two of the three question types. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Hyperparameters of the DT and BERT are given in Table 4 . Table 4 : Decision Transformer and the BERT QA hyperparameters. For the DT, Context length K refers to the amount of previous time steps with which the Transformer can conditiona on. Context State context window refers to the number of tokens from the state to be used for prediction. Adam (Kingma and Ba, 2015) is used as optimiser in conjunction with the specified learning rate, linear warmup and cosine weight decay.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 48, |
| "end": 55, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 58, |
| "end": 65, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Hyperparameter Tuning", |
| "sec_num": null |
| }, |
| { |
| "text": "As can be seen in Table 5 , the sufficient information score peaking at R 1 = 2 indicates optimal statespace exploration for location questions when the potential for future reward is moderate for random and fixed map types. While the QA accuracy was highest for both settings when the initial reward was the maximum of the training set, we opted to test the DT's question answering and information gathering capabilities at R 1 = 2 as this yielded the highest combined sufficient information and QA accuracy score.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 18, |
| "end": 25, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A.1.1 Location", |
| "sec_num": null |
| }, |
| { |
| "text": "Using both sufficient information and QA accuracy, the optimal initial reward for fixed map existence questions was determined to be 4.0, with the DT achieving a QA accuracy of 0.660 and a sufficient information score of 0.263 on the validation set. In random map settings, the DT scored a validation accuracy of 0.720 with a corresponding sufficient information score of 0.298, where the initial reward was determined to be the maximum of the training set 3.94.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1.2 Existence", |
| "sec_num": null |
| }, |
| { |
| "text": "The best sufficient information and QA accuracy combinations for the Decision Transformer were achieved at an initial reward of 2.0 for fixed and 5.0 for random map types. On the validation set, the fixed map DT achieved a QA accuracy of 0.533 and a SI score of 0.056. Random map saw a similar SI of 0.057 but worse QA accuracy of 0.460.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1.3 Attribute", |
| "sec_num": null |
| }, |
| { |
| "text": "Based on data gathered using the online-evaluation dataset, the optimal initial return-to-go for location type questions was 2.0. Using the BERT model for QA yielded an accuracy of 0.227 for fixed maps and 0.393 for random maps. The BERT model achieved a higher QA accuracy than sufficient information score during evaluation, indicating that the context window spanning multiple states was a boon to QA accuracy. During training, the BERT model achieved almost perfect scores for questionanswering on the held-out set of offline trajectories, seen in Table 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 552, |
| "end": 559, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "A.2.1 Location", |
| "sec_num": null |
| }, |
| { |
| "text": "Using the online-validation set, we determined optimal starting reward values of 3.0 for fixed map and 3.94 for random. These values were associated with a QA accuracy of 0.64 for fixed and 0.647 for random map types. However, scores were significantly lower than the offline validation set used during training, where QA accuracy of 0.778 and 0.779 was achieved for fixed and random maps, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2.2 Existence", |
| "sec_num": null |
| }, |
| { |
| "text": "In the offline validation set, the BERT model scored a QA accuracy of 0.616 for random and 0.780 for fixed map settings. On the online validation set, we observed the maximum combination of QA and sufficient information for the BERT model at an R 1 of 3.0 for fixed and 2.0 for random where the BERT QA model had an accuracy of 0.507 and 0.660 for random and fixed map types, respectively. However, we opted to use the maximum of the train set 4.03 when evaluating on the test set for random map types. This is due to the BERT QA model having a high standard deviation of 0.156 and an average QA accuracy of 0.640, indicating greater potential for high QA accuracy. Moreover, the sufficient information score associated with this accuracy is 0.056 -higher than the random map with an initial reward of 2.0. Transformer's answer-prediction head on the validation set when trained in the 500 games setting with different initial returns-to-go (RTG). See results in Table 5 . ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 963, |
| "end": 970, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A.2.3 Attribute", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www.microsoft.com/enus/research/project/textworld/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work is based on research supported in part by the National Research Foundation of South Africa (Grant Number: 129850). Some computations were performed using facilities provided by the University of Cape Town's ICTS High Performance Computing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "6" |
| }, |
| { |
| "text": " Table 6 : Question-answering accuracy of the 10K variation BERT model's and Decision Transformer's answer prediction head as well as the Decision Transformer's average sufficient information (SI) score on validation set at different initial return-to-go (RTG) values. Bold values indicate the combined highest QA and sufficient information score with the associated initial RTG value also bolded. Both models were trained on only 10 thousand episodes of data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1, |
| "end": 8, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Mann", |
| "suffix": "" |
| }, |
| { |
| "first": "Nick", |
| "middle": [], |
| "last": "Ryder", |
| "suffix": "" |
| }, |
| { |
| "first": "Melanie", |
| "middle": [], |
| "last": "Subbiah", |
| "suffix": "" |
| }, |
| { |
| "first": "Jared", |
| "middle": [ |
| "D" |
| ], |
| "last": "Kaplan", |
| "suffix": "" |
| }, |
| { |
| "first": "Prafulla", |
| "middle": [], |
| "last": "Dhariwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Arvind", |
| "middle": [], |
| "last": "Neelakantan", |
| "suffix": "" |
| }, |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Shyam", |
| "suffix": "" |
| }, |
| { |
| "first": "Girish", |
| "middle": [], |
| "last": "Sastry", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanda", |
| "middle": [], |
| "last": "Askell", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandhini", |
| "middle": [], |
| "last": "Agarwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Ariel", |
| "middle": [], |
| "last": "Herbert-Voss", |
| "suffix": "" |
| }, |
| { |
| "first": "Gretchen", |
| "middle": [], |
| "last": "Krueger", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Henighan", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Ramesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Ziegler", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Clemens", |
| "middle": [], |
| "last": "Winter", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Hesse", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Sigler", |
| "suffix": "" |
| }, |
| { |
| "first": "Mateusz", |
| "middle": [], |
| "last": "Litwin", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "33", |
| "issue": "", |
| "pages": "1877--1901", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Ma- teusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Ad- vances in Neural Information Processing Systems, volume 33, pages 1877-1901. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Aravind Srinivas, and Igor Mordatch. 2021. Decision transformer: Reinforcement learning via sequence modeling", |
| "authors": [ |
| { |
| "first": "Lili", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Aravind", |
| "middle": [], |
| "last": "Rajeswaran", |
| "suffix": "" |
| }, |
| { |
| "first": "Kimin", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Grover", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Laskin", |
| "suffix": "" |
| }, |
| { |
| "first": "Pieter", |
| "middle": [], |
| "last": "Abbeel", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Ar- avind Srinivas, and Igor Mordatch. 2021. Decision transformer: Reinforcement learning via sequence modeling. In Advances in Neural Information Pro- cessing Systems.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "On the properties of neural machine translation: Encoder-decoder approaches", |
| "authors": [ |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merri\u00ebnboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of SSST-8, Eighth Workshop on Syntax, Semantics and Structure in Statistical Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "103--111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyunghyun Cho, Bart van Merri\u00ebnboer, Dzmitry Bah- danau, and Yoshua Bengio. 2014. On the properties of neural machine translation: Encoder-decoder ap- proaches. In Proceedings of SSST-8, Eighth Work- shop on Syntax, Semantics and Structure in Statistical Translation, pages 103-111.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Textworld: A learning environment for text-based games", |
| "authors": [ |
| { |
| "first": "\u00c1kos", |
| "middle": [], |
| "last": "Marc-Alexandre C\u00f4t\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Xingdi", |
| "middle": [], |
| "last": "K\u00e1d\u00e1r", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Tavian", |
| "middle": [], |
| "last": "Kybartas", |
| "suffix": "" |
| }, |
| { |
| "first": "Emery", |
| "middle": [], |
| "last": "Barnes", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Fine", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Moore", |
| "suffix": "" |
| }, |
| { |
| "first": "Layla", |
| "middle": [ |
| "El" |
| ], |
| "last": "Hausknecht", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahmoud", |
| "middle": [], |
| "last": "Asri", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Adada", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Workshop on Computer Games", |
| "volume": "", |
| "issue": "", |
| "pages": "41--75", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc-Alexandre C\u00f4t\u00e9, \u00c1kos K\u00e1d\u00e1r, Xingdi Yuan, Ben Kybartas, Tavian Barnes, Emery Fine, James Moore, Matthew Hausknecht, Layla El Asri, Mahmoud Adada, et al. 2018. Textworld: A learning environ- ment for text-based games. In Workshop on Com- puter Games, pages 41-75. Springer.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Procedural knowledge", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Amy", |
| "middle": [ |
| "L" |
| ], |
| "last": "Georgeff", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lansky", |
| "suffix": "" |
| } |
| ], |
| "year": 1986, |
| "venue": "Proceedings of the IEEE", |
| "volume": "74", |
| "issue": "10", |
| "pages": "1383--1398", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael P Georgeff and Amy L Lansky. 1986. Pro- cedural knowledge. Proceedings of the IEEE, 74(10):1383-1398.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Iqa: Visual question answering in interactive environments", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Gordon", |
| "suffix": "" |
| }, |
| { |
| "first": "Aniruddha", |
| "middle": [], |
| "last": "Kembhavi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Rastegari", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Redmon", |
| "suffix": "" |
| }, |
| { |
| "first": "Dieter", |
| "middle": [], |
| "last": "Fox", |
| "suffix": "" |
| }, |
| { |
| "first": "Ali", |
| "middle": [], |
| "last": "Farhadi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "4089--4098", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Gordon, Aniruddha Kembhavi, Mohammad Rastegari, Joseph Redmon, Dieter Fox, and Ali Farhadi. 2018. Iqa: Visual question answering in interactive environments. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recog- nition, pages 4089-4098.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Rainbow: Combining improvements in deep reinforcement learning", |
| "authors": [ |
| { |
| "first": "Matteo", |
| "middle": [], |
| "last": "Hessel", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Modayil", |
| "suffix": "" |
| }, |
| { |
| "first": "Hado", |
| "middle": [], |
| "last": "Van Hasselt", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Schaul", |
| "suffix": "" |
| }, |
| { |
| "first": "Georg", |
| "middle": [], |
| "last": "Ostrovski", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Dabney", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Horgan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bilal", |
| "middle": [], |
| "last": "Piot", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Azar", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Silver", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Thirty-second AAAI conference on artificial intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matteo Hessel, Joseph Modayil, Hado Van Hasselt, Tom Schaul, Georg Ostrovski, Will Dabney, Dan Horgan, Bilal Piot, Mohammad Azar, and David Silver. 2018. Rainbow: Combining improvements in deep rein- forcement learning. In Thirty-second AAAI confer- ence on artificial intelligence.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Offline reinforcement learning as one big sequence modeling problem", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Janner", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiyang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Levine", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Janner, Qiyang Li, and Sergey Levine. 2021. Offline reinforcement learning as one big sequence modeling problem. In Advances in Neural Informa- tion Processing Systems.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "ICLR (Poster)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In ICLR (Poster).", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Patentbert: Patent classification with fine-tuning a pre-trained bert model", |
| "authors": [ |
| { |
| "first": "Jieh-Sheng", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Jieh", |
| "middle": [], |
| "last": "Hsiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.02124" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jieh-Sheng Lee and Jieh Hsiang. 2019. Patentbert: Patent classification with fine-tuning a pre-trained bert model. arXiv preprint arXiv:1906.02124.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "GPT-too: A language-model-first approach for AMR-to-text generation", |
| "authors": [ |
| { |
| "first": "Manuel", |
| "middle": [], |
| "last": "Mager", |
| "suffix": "" |
| }, |
| { |
| "first": "Ram\u00f3n", |
| "middle": [], |
| "last": "Fernandez Astudillo", |
| "suffix": "" |
| }, |
| { |
| "first": "Tahira", |
| "middle": [], |
| "last": "Naseem", |
| "suffix": "" |
| }, |
| { |
| "first": "Arafat", |
| "middle": [], |
| "last": "Md", |
| "suffix": "" |
| }, |
| { |
| "first": "Young-Suk", |
| "middle": [], |
| "last": "Sultan", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Florian", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1846--1852", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.167" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manuel Mager, Ram\u00f3n Fernandez Astudillo, Tahira Naseem, Md Arafat Sultan, Young-Suk Lee, Radu Florian, and Salim Roukos. 2020. GPT-too: A language-model-first approach for AMR-to-text gen- eration. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 1846-1852, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Human-level control through deep reinforcement learning", |
| "authors": [ |
| { |
| "first": "Volodymyr", |
| "middle": [], |
| "last": "Mnih", |
| "suffix": "" |
| }, |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Silver", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrei", |
| "middle": [ |
| "A" |
| ], |
| "last": "Rusu", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Veness", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [ |
| "G" |
| ], |
| "last": "Bellemare", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Riedmiller", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [ |
| "K" |
| ], |
| "last": "Fidjeland", |
| "suffix": "" |
| }, |
| { |
| "first": "Georg", |
| "middle": [], |
| "last": "Ostrovski", |
| "suffix": "" |
| }, |
| { |
| "first": "Stig", |
| "middle": [], |
| "last": "Petersen", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Charles Beattie", |
| "volume": "518", |
| "issue": "7540", |
| "pages": "529--533", |
| "other_ids": { |
| "DOI": [ |
| "10.1038/nature14236" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A. Rusu, Joel Veness, Marc G. Bellemare, Alex Graves, Martin Riedmiller, Andreas K. Fidje- land, Georg Ostrovski, Stig Petersen, Charles Beat- tie, Amir Sadik, Ioannis Antonoglou, Helen King, Dharshan Kumaran, Daan Wierstra, Shane Legg, and Demis Hassabis. 2015. Human-level control through deep reinforcement learning. Nature, 518(7540):529- 533.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Language models are unsupervised multitask learners", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "OpenAI blog", |
| "volume": "1", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Zero-shot text-to-image generation", |
| "authors": [ |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Ramesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Mikhail", |
| "middle": [], |
| "last": "Pavlov", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabriel", |
| "middle": [], |
| "last": "Goh", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Gray", |
| "suffix": "" |
| }, |
| { |
| "first": "Chelsea", |
| "middle": [], |
| "last": "Voss", |
| "suffix": "" |
| }, |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 38th International Conference on Machine Learning", |
| "volume": "139", |
| "issue": "", |
| "pages": "8821--8831", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. 2021. Zero-shot text-to-image gen- eration. In Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 8821-8831. PMLR.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "NewsQA: A machine comprehension dataset", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Trischler", |
| "suffix": "" |
| }, |
| { |
| "first": "Tong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xingdi", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Harris", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Sordoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Bachman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaheer", |
| "middle": [], |
| "last": "Suleman", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2nd Workshop on Representation Learning for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "191--200", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-2623" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Trischler, Tong Wang, Xingdi Yuan, Justin Har- ris, Alessandro Sordoni, Philip Bachman, and Kaheer Suleman. 2017. NewsQA: A machine comprehen- sion dataset. In Proceedings of the 2nd Workshop on Representation Learning for NLP, pages 191-200, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Deep reinforcement learning with double q-learning", |
| "authors": [ |
| { |
| "first": "Arthur", |
| "middle": [], |
| "last": "Hado Van Hasselt", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Guez", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Silver", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the AAAI conference on artificial intelligence", |
| "volume": "30", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hado Van Hasselt, Arthur Guez, and David Silver. 2016. Deep reinforcement learning with double q-learning. In Proceedings of the AAAI conference on artificial intelligence, volume 30.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "The Decision Transformer", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "text": "An example QAit trajectory in the form (r 1 , s 1 , a 1 , r 2 , s 2 , a 2 , . . . , s T , a T ). Each time step consists of the reward r t , state s t and command a t . The question and correct answer are also given, along with the returns-to-go R t .", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "text": "Barplot showing QA accuracy of the BERT QA model on the validation set when trained in the 500 games setting with different initial returns-to-go (RTG). See results inTable 5.", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "FIGREF3": { |
| "text": "Barplot showing QA accuracy of the Decision", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "html": null, |
| "text": ".244) 0.204(0.216) 0.674(0.279) 0.678(0.214) 0.534(0.014) 0.530(0.017) DDQN 0.218(0.228) 0.222(0.246) 0.626(0.213) 0.656(0.188) 0.508(0.026) 0.486(0.023) Rainbow 0.190(0.196) 0.172(0.178) 0.656(0.207) 0.678(0.191) 0.496(0.029) 0.494(0.017)", |
| "num": null, |
| "content": "<table><tr><td>Model</td><td>Fixed</td><td>Location Random</td><td>Existence Fixed Random</td><td>Attribute Fixed Random</td></tr><tr><td colspan=\"5\">DQN 0.224(0DT 0.168(0.232) 0.104(0.264) 0.668(0.254) 0.722(0.277) 0.504(0.057) 0.526(0.058) DT-BERT 0.232(0.232) 0.270(0.264) 0.626(0.258) 0.654(0.277) 0.524(0.058) 0.538(0.060)</td></tr><tr><td colspan=\"5\">DT-10K DT-BERT-10K 0.124(0.302) 0.076(0.204) 0.612(0.241) 0.676(0.223) 0.552(0.060) 0.518(0.049) 0.146(0.302) 0.102(0.220) 0.688(0.240) 0.618(0.255) 0.488(0.058) 0.490(0.048)</td></tr></table>" |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "html": null, |
| "text": "QA accuracy and sufficient information score (in brackets) of each model following zero-shot evaluation on the test set in the 500 games setting. A bold value indicates a score to be the highest of that question and map type configuration.", |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "html": null, |
| "text": "Question Map Type BERT BERT-10K", |
| "num": null, |
| "content": "<table><tr><td>Attribute</td><td>Fixed</td><td>0.780</td><td>0.730</td></tr><tr><td/><td>Random</td><td>0.616</td><td>0.703</td></tr><tr><td>Existence</td><td>Fixed</td><td>0.778</td><td>0.762</td></tr><tr><td/><td>Random</td><td>0.779</td><td>0.778</td></tr><tr><td>Location</td><td>Fixed</td><td>0.987</td><td>0.831</td></tr><tr><td/><td>Random</td><td>0.988</td><td>0.835</td></tr></table>" |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "html": null, |
| "text": "QA accuracy of the BERT model and the BERT-10K model on the QA validation data.", |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "html": null, |
| "text": "2019. Interactive language learning by question answering. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2796-2813, Hong Kong, China. Association for Computational Linguistics.", |
| "num": null, |
| "content": "<table><tr><td>Xingdi Yuan, Marc-Alexandre C\u00f4t\u00e9, Alessandro Sor-doni, Romain Laroche, Remi Tachet des Combes, Matthew J. Hausknecht, and Adam Trischler. 2018. Counting to explore and generalize in text-based games. CoRR, abs/1806.11525.</td></tr><tr><td>Sina Zarrie\u00df, Henrik Voigt, and Simeon Sch\u00fcz. 2021. Decoding methods in neural language generation: A survey. Information, 12(9).</td></tr></table>" |
| }, |
| "TABREF8": { |
| "type_str": "table", |
| "html": null, |
| "text": "Results of Fixed Map Experiments", |
| "num": null, |
| "content": "<table><tr><td>Random</td></tr></table>" |
| }, |
| "TABREF9": { |
| "type_str": "table", |
| "html": null, |
| "text": "Results of Random Map Experiments 58", |
| "num": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |