| {"id": "HJxPAFgEON", "original": "ryxaiwCM_4", "number": 8, "cdate": 1553365454817, "ddate": null, "tcdate": 1553365454817, "tmdate": 1683306289190, "tddate": null, "forum": "HJxPAFgEON", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/drlStructPred/-/Blind_Submission", "content": {"title": "Neural Program Planner for Structured Predictions", "authors": ["Jacob Biloki", "Chen Liang", "Ni Lao"], "authorids": ["bilokij@mosaix.ai", "crazydonkey@google.com", "ni.lao@mosaix.ai"], "keywords": ["Neural Networks", "Planning", "Reinforcement Learning", "Structured Prediction", "WikiTableQuestions"], "TL;DR": "A model-based planning component improves RL-based semantic parsing on WikiTableQuestions.", "abstract": "We consider the problem of weakly supervised structured prediction (SP) with reinforcement learning (RL) \u2013 for example, given a database table and a question, perform a sequence of computation actions on the table, which generates a response and receives a binary success-failure reward. This line of research has been successful by leveraging RL to directly optimizes the desired metrics of the SP tasks \u2013 for example, the accuracy in question answering or BLEU score in machine translation. However, different from the common RL settings, the environment dynamics is deterministic in SP, which hasn\u2019t been fully utilized by the model-freeRL methods that are usually applied. Since SP models usually have full access to the environment dynamics, we propose to apply model-based RL methods, which rely on planning as a primary model component. We demonstrate the effectiveness of planning-based SP with a Neural Program Planner (NPP), which, given a set of candidate programs from a pretrained search policy, decides which program is the most promising considering all the information generated from executing these programs. We evaluate NPP on weakly supervised program synthesis from natural language(semantic parsing) by stacked learning a planning module based on pretrained search policies. On the WIKITABLEQUESTIONS benchmark, NPP achieves a new state-of-the-art of 47.2% accuracy.", "pdf": "/pdf/f1e73003f7ff77c8c82fd67bafcb937a784ba0ac.pdf", "paperhash": "biloki|neural_program_planner_for_structured_predictions", "venue": "drlStructPred 2019", "venueid": "ICLR.cc/2019/Workshop/drlStructPred", "_bibtex": "@misc{\nbiloki2019neural,\ntitle={Neural Program Planner for Structured Predictions},\nauthor={Jacob Biloki and Chen Liang and Ni Lao},\nyear={2019},\nurl={https://openreview.net/forum?id=HJxPAFgEON}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/drlStructPred"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/drlStructPred"], "pdate": 1554910465588, "odate": 1554910465588, "details": {"replyCount": 4}} | |
| {"id": "S1gUCFx4dN", "original": "BJlidIdzd4", "number": 7, "cdate": 1553365454294, "ddate": null, "tcdate": 1553365454294, "tmdate": 1683306288706, "tddate": null, "forum": "S1gUCFx4dN", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/drlStructPred/-/Blind_Submission", "content": {"title": "LEARNING NEUROSYMBOLIC GENERATIVE MODELS VIA PROGRAM SYNTHESIS", "authors": ["Halley Young", "Osbert Bastani", "Mayur Naik"], "authorids": ["halleyy@seas.upenn.edu", "obastani@seas.upenn.edu", "mhnaik@seas.upenn.edu"], "keywords": ["structure", "deep learning", "generative models", "structured prediction"], "TL;DR": "Applying program synthesis to the tasks of image completion and generation within a deep learning framework", "abstract": "Significant strides have been made toward designing better generative models in recent years. Despite this progress, however, state-of-the-art approaches are still largely unable to capture complex global structure in data. For example, images of buildings typically contain spatial patterns such as windows repeating at regular intervals; state-of-the-art generative methods can\u2019t easily reproduce these structures. We propose to address this problem by incorporating programs representing global structure into the generative model\u2014e.g., a 2D for-loop may represent a configuration of windows. Furthermore, we propose a framework for learning these models by leveraging program synthesis to generate training data. On both synthetic and real-world data, we demonstrate that our approach is substantially better than the state-of-the-art at both generating and completing images that contain global structure.\n", "pdf": "/pdf/303256eb2c29ab532fe9b3ebcbebebba04bfde2a.pdf", "paperhash": "young|learning_neurosymbolic_generative_models_via_program_synthesis", "venue": "drlStructPred 2019", "venueid": "ICLR.cc/2019/Workshop/drlStructPred", "_bibtex": "@misc{\nyoung2019learning,\ntitle={{LEARNING} {NEUROSYMBOLIC} {GENERATIVE} {MODELS} {VIA} {PROGRAM} {SYNTHESIS}},\nauthor={Halley Young and Osbert Bastani and Mayur Naik},\nyear={2019},\nurl={https://openreview.net/forum?id=S1gUCFx4dN}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/drlStructPred"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/drlStructPred"], "pdate": 1554910466402, "odate": 1554910466402, "details": {"replyCount": 5}} | |
| {"id": "S1eU0KxE_4", "original": "r1eV0geoD4", "number": 6, "cdate": 1553365453762, "ddate": null, "tcdate": 1553365453762, "tmdate": 1683306288604, "tddate": null, "forum": "S1eU0KxE_4", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/drlStructPred/-/Blind_Submission", "content": {"title": "A Study of State Aliasing in Structured Prediction with RNNs", "authors": ["Layla El Asri", "Adam Trischler"], "authorids": ["layla.elasri@microsoft.com", "adam.trischler@microsoft.com"], "keywords": ["deep reinforcement learning", "structured prediction", "dialogue"], "abstract": "End-to-end reinforcement learning agents learn a state representation and a policy at the same time. Recurrent neural networks (RNNs) have been trained successfully as reinforcement learning agents in settings like dialogue that require structured prediction. In this paper, we investigate the representations learned by RNN-based agents when trained with both policy gradient and value-based methods. We show through extensive experiments and analysis that, when trained with policy gradient, recurrent neural networks often fail to learn a state representation that leads to an optimal policy in settings where the same action should be taken at different states. To explain this failure, we highlight the problem of state aliasing, which entails conflating two or more distinct states in the representation space. We demonstrate that state aliasing occurs when several states share the same optimal action and the agent is trained via policy gradient. We characterize this phenomenon through experiments on a simple maze setting and a more complex text-based game, and make recommendations for training RNNs with reinforcement learning.", "pdf": "/pdf/e044c1dccc2ccc3b173a60ebabfbeb2c8672b009.pdf", "paperhash": "asri|a_study_of_state_aliasing_in_structured_prediction_with_rnns", "venue": "drlStructPred 2019", "venueid": "ICLR.cc/2019/Workshop/drlStructPred", "_bibtex": "@misc{\nasri2019a,\ntitle={A Study of State Aliasing in Structured Prediction with {RNN}s},\nauthor={Layla El Asri and Adam Trischler},\nyear={2019},\nurl={https://openreview.net/forum?id=S1eU0KxE_4}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/drlStructPred"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/drlStructPred"], "pdate": 1554910466662, "odate": 1554910466662, "details": {"replyCount": 5}} | |
| {"id": "HJgxTf89vV", "original": "H1leRHsKDN", "number": 5, "cdate": 1552732856447, "ddate": null, "tcdate": 1552732856447, "tmdate": 1683306288445, "tddate": null, "forum": "HJgxTf89vV", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/drlStructPred/-/Blind_Submission", "content": {"title": "Learning proposals for sequential importance samplers using reinforced variational inference", "authors": ["Zafarali Ahmed", "Arjun Karuvally", "Doina Precup", "Simon Gravel"], "authorids": ["zafarali.ahmed@mail.mcgill.ca", "akaruvally@cs.umass.edu", "dprecup@cs.mcgill.ca", "simon.gravel@mcgill.ca"], "keywords": ["variational inference", "reinforcement learning", "monte carlo methods", "stochastic processes"], "abstract": "The problem of inferring unobserved values in a partially observed trajectory from a stochastic process can be considered as a structured prediction problem. Traditionally inference is conducted using heuristic-based Monte Carlo methods. This work considers learning heuristics by leveraging a connection between policy optimization reinforcement learning and approximate inference. In particular, we learn proposal distributions used in importance samplers by casting it as a variational inference problem. We then rewrite the variational lower bound as a policy optimization problem similar to Weber et al. (2015) allowing us to transfer techniques from reinforcement learning. We apply this technique to a simple stochastic process as a proof-of-concept and show that while it is viable, it will require more engineering effort to scale inference for rare observations", "pdf": "/pdf/760d87e3cc1d14c32a0a9a208025d5c5ba0b0fb9.pdf", "paperhash": "ahmed|learning_proposals_for_sequential_importance_samplers_using_reinforced_variational_inference", "venue": "drlStructPred 2019", "venueid": "ICLR.cc/2019/Workshop/drlStructPred", "_bibtex": "@misc{\nahmed2019learning,\ntitle={Learning proposals for sequential importance samplers using reinforced variational inference},\nauthor={Zafarali Ahmed and Arjun Karuvally and Doina Precup and Simon Gravel},\nyear={2019},\nurl={https://openreview.net/forum?id=HJgxTf89vV}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/drlStructPred"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/drlStructPred"], "pdate": 1554910466917, "odate": 1554910466917, "details": {"replyCount": 5}} | |
| {"id": "r1lgTGL5DE", "original": "Bye7JIPFPN", "number": 4, "cdate": 1552732855688, "ddate": null, "tcdate": 1552732855688, "tmdate": 1683306287824, "tddate": null, "forum": "r1lgTGL5DE", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/drlStructPred/-/Blind_Submission", "content": {"title": "Buy 4 REINFORCE Samples, Get a Baseline for Free!", "authors": ["Wouter Kool", "Herke van Hoof", "Max Welling"], "authorids": ["w.w.m.kool@uva.nl", "h.c.vanhoof@uva.nl", "m.welling@uva.nl"], "keywords": ["reinforce", "multiple samples", "baseline", "sequence generation", "structured prediction", "travelling salesman problem"], "TL;DR": "We show that by drawing multiple samples (predictions) per input (datapoint), we can learn with less data as we freely obtain a REINFORCE baseline.", "abstract": "REINFORCE can be used to train models in structured prediction settings to directly optimize the test-time objective. However, the common case of sampling one prediction per datapoint (input) is data-inefficient. We show that by drawing multiple samples (predictions) per datapoint, we can learn with significantly less data, as we freely obtain a REINFORCE baseline to reduce variance. Additionally we derive a REINFORCE estimator with baseline, based on sampling without replacement. Combined with a recent technique to sample sequences without replacement using Stochastic Beam Search, this improves the training procedure for a sequence model that predicts the solution to the Travelling Salesman Problem.", "pdf": "/pdf/f9c4b4c4221d6e1be16d74b88a4e8ad76387b95c.pdf", "paperhash": "kool|buy_4_reinforce_samples_get_a_baseline_for_free", "venue": "drlStructPred 2019", "venueid": "ICLR.cc/2019/Workshop/drlStructPred", "_bibtex": "@misc{\nkool2019buy,\ntitle={Buy 4 {REINFORCE} Samples, Get a Baseline for Free!},\nauthor={Wouter Kool and Herke van Hoof and Max Welling},\nyear={2019},\nurl={https://openreview.net/forum?id=r1lgTGL5DE}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/drlStructPred"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/drlStructPred"], "pdate": 1554910467167, "odate": 1554910467167, "details": {"replyCount": 5}} | |
| {"id": "BJeypMU5wE", "original": "SkgtOd8twN", "number": 3, "cdate": 1552732855144, "ddate": null, "tcdate": 1552732855144, "tmdate": 1683306287776, "tddate": null, "forum": "BJeypMU5wE", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/drlStructPred/-/Blind_Submission", "content": {"title": "Multi-agent query reformulation: Challenges and the role of diversity", "authors": ["Rodrigo Nogueira", "Jannis Bulian", "Massimiliano Ciaramita"], "authorids": ["rodrigonogueira@nyu.edu", "jbulian@google.com", "massi@google.com"], "keywords": ["natural language", "reinforcement learning", "structured prediction", "multi-agent learning", "deep learning"], "TL;DR": "We use reinforcement learning for query reformulation on two tasks and surprisingly find that when training multiple agents diversity of the reformulations is more important than specialisation.", "abstract": "We investigate methods to efficiently learn diverse strategies in reinforcement learning for a generative structured prediction problem: query reformulation. In the proposed framework an agent consists of multiple specialized sub-agents and a meta-agent that learns to aggregate the answers from sub-agents to produce a final answer. Sub-agents are trained on disjoint partitions of the training data, while the meta-agent is trained on the full training set. Our method makes learning faster, because it is highly parallelizable, and has better generalization performance than strong baselines, such as\nan ensemble of agents trained on the full data. We evaluate on the tasks of document retrieval and question answering. The\nimproved performance seems due to the increased diversity of reformulation strategies. This suggests that multi-agent, hierarchical approaches might play an important role in structured prediction tasks of this kind. However, we also find that it is not obvious how to characterize diversity in this context, and a first attempt based on clustering did not produce good results. Furthermore, reinforcement learning for the reformulation task is hard in high-performance regimes. At best, it only marginally improves over the state of the art, which highlights the complexity of training models in this framework for end-to-end language understanding problems.", "pdf": "/pdf/79f93f4278188aca1878961668716ba4e0595662.pdf", "paperhash": "nogueira|multiagent_query_reformulation_challenges_and_the_role_of_diversity", "venue": "drlStructPred 2019", "venueid": "ICLR.cc/2019/Workshop/drlStructPred", "_bibtex": "@misc{\nnogueira2019multiagent,\ntitle={Multi-agent query reformulation: Challenges and the role of diversity},\nauthor={Rodrigo Nogueira and Jannis Bulian and Massimiliano Ciaramita},\nyear={2019},\nurl={https://openreview.net/forum?id=BJeypMU5wE}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/drlStructPred"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/drlStructPred"], "pdate": 1554910467427, "odate": 1554910467427, "details": {"replyCount": 5}} | |
| {"id": "Syl1pGI9wN", "original": "S1xF0w5wwE", "number": 2, "cdate": 1552732854588, "ddate": null, "tcdate": 1552732854588, "tmdate": 1750552039020, "tddate": null, "forum": "Syl1pGI9wN", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/drlStructPred/-/Blind_Submission", "content": {"title": "Connecting the Dots Between MLE and RL for Sequence Generation", "authors": ["Bowen Tan*", "Zhiting Hu*", "Zichao Yang", "Ruslan Salakhutdinov", "Eric P. Xing"], "authorids": ["bwkevintan@gmail.com", "zhitinghu@gmail.com", "yangtze2301@gmail.com", "rsalakhu@cs.cmu.edu", "eric.xing@petuum.com"], "keywords": ["sequence generation", "maximum likelihood learning", "reinforcement learning", "policy optimization", "text generation", "reward augmented maximum likelihood", "exposure bias"], "TL;DR": "A unified perspective of various learning algorithms for sequence generation, such as MLE, RL, RAML, data noising, etc.", "abstract": "Sequence generation models such as recurrent networks can be trained with a diverse set of learning algorithms. For example, maximum likelihood learning is simple and efficient, yet suffers from the exposure bias problem. Reinforcement learning like policy gradient addresses the problem but can have prohibitively poor exploration efficiency. A variety of other algorithms such as RAML, SPG, and data noising, have also been developed in different perspectives. This paper establishes a formal connection between these algorithms. We present a generalized entropy regularized policy optimization formulation, and show that the apparently divergent algorithms can all be reformulated as special instances of the framework, with the only difference being the configurations of reward function and a couple of hyperparameters. The unified interpretation offers a systematic view of the varying properties of exploration and learning efficiency. Besides, based on the framework, we present a new algorithm that dynamically interpolates among the existing algorithms for improved learning. Experiments on machine translation and text summarization demonstrate the superiority of the proposed algorithm.", "pdf": "/pdf/81d5d9ba2a559a383d5c1752e96c74ea9c43ba6f.pdf", "paperhash": "tan|connecting_the_dots_between_mle_and_rl_for_sequence_generation", "venue": "drlStructPred 2019", "venueid": "ICLR.cc/2019/Workshop/drlStructPred", "_bibtex": "@misc{\ntan*2019connecting,\ntitle={Connecting the Dots Between {MLE} and {RL} for Sequence Generation},\nauthor={Bowen Tan* and Zhiting Hu* and Zichao Yang and Ruslan Salakhutdinov and Eric P. Xing},\nyear={2019},\nurl={https://openreview.net/forum?id=Syl1pGI9wN}\n}", "community_implementations": "[ 1 code implementation](https://www.catalyzex.com/paper/connecting-the-dots-between-mle-and-rl-for/code)"}, "signatures": ["ICLR.cc/2019/Workshop/drlStructPred"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/drlStructPred"], "pdate": 1554910467685, "odate": 1554910467685, "details": {"replyCount": 6}} | |
| {"id": "Bke03G85DN", "original": "Hklld2oMwN", "number": 1, "cdate": 1552732854009, "ddate": null, "tcdate": 1552732854009, "tmdate": 1683306287655, "tddate": null, "forum": "Bke03G85DN", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/drlStructPred/-/Blind_Submission", "content": {"title": "Robust Reinforcement Learning for Autonomous Driving ", "authors": ["Yesmina Jaafra", "Jean Luc Laurent", "Aline Deruyver", "Mohamed Saber Naceur"], "authorids": ["yasmina.jaafra@etu.unistra.fr", "jeanluc.laurent@segula.fr", "aline.deruyver@unistra.fr", "naceurs@yahoo.fr"], "keywords": ["Neural networks", "Deep reinforcement learning", "Actor-critic model", "Autonomous driving", "Carla simulator"], "TL;DR": "An actor-critic reinforcement learning approach with multi-step returns applied to autonomous driving with Carla simulator.", "abstract": "Autonomous driving is still considered as an \u201cunsolved problem\u201d given its inherent important variability and that many processes associated with its development like vehicle control and scenes recognition remain open issues. Despite reinforcement learning algorithms have achieved notable results in games and some robotic manipulations, this technique has not been widely scaled up to the more challenging real world applications like autonomous driving. In this work, we propose a deep reinforcement learning (RL) algorithm embedding an actor critic architecture with multi-step returns to achieve a better robustness of the agent learning strategies when acting in complex and unstable environments. The experiment is conducted with Carla simulator offering a customizable and realistic urban driving conditions. The developed deep actor RL guided by a policy-evaluator critic distinctly surpasses the performance of a standard deep RL agent.", "pdf": "/pdf/397d01657f5dd4128902aaa36b44f1680c30bdda.pdf", "paperhash": "jaafra|robust_reinforcement_learning_for_autonomous_driving", "venue": "drlStructPred 2019", "venueid": "ICLR.cc/2019/Workshop/drlStructPred", "_bibtex": "@misc{\njaafra2019robust,\ntitle={Robust Reinforcement Learning for Autonomous Driving },\nauthor={Yesmina Jaafra and Jean Luc Laurent and Aline Deruyver and Mohamed Saber Naceur},\nyear={2019},\nurl={https://openreview.net/forum?id=Bke03G85DN}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/drlStructPred"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/drlStructPred"], "pdate": 1554910466143, "odate": 1554910466143, "details": {"replyCount": 5}} | |