diff --git "a/iclr2019_withdrawn_submission.jsonl" "b/iclr2019_withdrawn_submission.jsonl" new file mode 100644--- /dev/null +++ "b/iclr2019_withdrawn_submission.jsonl" @@ -0,0 +1,160 @@ +{"id": "SygjB3AcYX", "original": "ryeveo65tQ", "number": 1575, "cdate": 1538088003278, "ddate": null, "tcdate": 1538088003278, "tmdate": 1750551531910, "tddate": null, "forum": "SygjB3AcYX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Generalized Label Propagation Methods for Semi-Supervised Learning", "abstract": "The key challenge in semi-supervised learning is how to effectively leverage unlabeled data to improve learning performance. The classical label propagation method, despite its popularity, has limited modeling capability in that it only exploits graph information for making predictions. In this paper, we consider label propagation from a graph signal processing perspective and decompose it into three components: signal, filter, and classifier. By extending the three components, we propose a simple generalized label propagation (GLP) framework for semi-supervised learning. GLP naturally integrates graph and data feature information, and offers the flexibility of selecting appropriate filters and domain-specific classifiers for different applications. Interestingly, GLP also provides new insight into the popular graph convolutional network and elucidates its working mechanisms. Extensive experiments on three citation networks, one knowledge graph, and one image dataset demonstrate the efficiency and effectiveness of GLP.", "keywords": ["semi-supervised learning", "label propagation", "graph convolutional networks"], "authorids": ["csqmli@comp.polyu.edu.hk", "xiao-ming.wu@polyu.edu.hk", "zcguan@zju.edu.cn"], "authors": ["Qimai Li", "Xiao-Ming Wu", "Zhichao Guan."], "pdf": "/pdf/cfb7db7f0a214bfcbc0555687fd2b2cc076a762f.pdf", "paperhash": "li|generalized_label_propagation_methods_for_semisupervised_learning", "TL;DR": "We extend the classical label propation methods to jointly model graph and feature information from a graph filtering perspective, and show connections to the graph convlutional networks.", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/generalized-label-propagation-methods-for/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538088003303, "details": {"replyCount": 9, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HyesB2RqFQ", "original": "H1e8r1CcY7", "number": 1574, "cdate": 1538088003111, "ddate": null, "tcdate": 1538088003111, "tmdate": 1683306265918, "tddate": null, "forum": "HyesB2RqFQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Bridging HMMs and RNNs through Architectural Transformations", "abstract": "A distinct commonality between HMMs and RNNs is that they both learn hidden representations for sequential data. In addition, it has been noted that the backward computation of the Baum-Welch algorithm for HMMs is a special case of the back propagation algorithm used for neural networks (Eisner (2016)). Do these observations suggest that, despite their many apparent differences, HMMs are a special case of RNNs? In this paper, we investigate a series of architectural transformations between HMMs and RNNs, both through theoretical derivations and empirical hybridization, to answer this question. In particular, we investigate three key design factors\u2014independence assumptions between the hidden states and the observation, the placement of softmax, and the use of non-linearity\u2014in order to pin down their empirical effects. We present a comprehensive empirical study to provide insights on the interplay between expressivity and interpretability with respect to language modeling and parts-of-speech induction. ", "keywords": ["rnns", "hmms", "latent variable models", "language modelling", "interpretability", "sequence modelling"], "authorids": ["jbuys@cs.washington.edu", "ybisk@yonatanbisk.com", "yejin@cs.washington.edu"], "authors": ["Jan Buys", "Yonatan Bisk", "Yejin Choi"], "TL;DR": "Are HMMs a special case of RNNs? We investigate a series of architectural transformations between HMMs and RNNs, both through theoretical derivations and empirical hybridization and provide new insights.", "pdf": "/pdf/b486ea654c05277b3fdf1e894bf11956a5b8ca77.pdf", "paperhash": "buys|bridging_hmms_and_rnns_through_architectural_transformations"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538088003135, "details": {"replyCount": 11, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SJxiHnCqKQ", "original": "HylNAGa9YX", "number": 1571, "cdate": 1538088002598, "ddate": null, "tcdate": 1538088002598, "tmdate": 1683306265814, "tddate": null, "forum": "SJxiHnCqKQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "MCTSBug: Generating Adversarial Text Sequences via Monte Carlo Tree Search and Homoglyph Attack", "abstract": "Crafting adversarial examples on discrete inputs like text sequences is fundamentally different from generating such examples for continuous inputs like images. This paper tries to answer the question: under a black-box setting, can we create adversarial examples automatically to effectively fool deep learning classifiers on texts by making imperceptible changes? Our answer is a firm yes. Previous efforts mostly replied on using gradient evidence, and they are less effective either due to finding the nearest neighbor word (wrt meaning) automatically is difficult or relying heavily on hand-crafted linguistic rules. We, instead, use Monte Carlo tree search (MCTS) for finding the most important few words to perturb and perform homoglyph attack by replacing one character in each selected word with a symbol of identical shape. Our novel algorithm, we call MCTSBug, is black-box and extremely effective at the same time. Our experimental results indicate that MCTSBug can fool deep learning classifiers at the success rates of 95% on seven large-scale benchmark datasets, by perturbing only a few characters. Surprisingly, MCTSBug, without relying on gradient information at all, is more effective than the gradient-based white-box baseline. Thanks to the nature of homoglyph attack, the generated adversarial perturbations are almost imperceptible to human eyes. ", "keywords": ["Adversarial sample", "Text", "Black-box", "MCTS", "Homoglyph"], "authorids": ["jg6yd@virginia.edu", "jjl5sw@virginia.edu", "yanjun@virginia.edu"], "authors": ["Ji Gao", "Jack Lanchantin", "Yanjun Qi"], "TL;DR": "Use Monte carlo Tree Search and Homoglyphs to generate indistinguishable adversarial samples on text data", "pdf": "/pdf/3b7dc1da510d6522620e1c54f017587e13438f3a.pdf", "paperhash": "gao|mctsbug_generating_adversarial_text_sequences_via_monte_carlo_tree_search_and_homoglyph_attack"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538088002623, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SJfcrn0qKX", "original": "ryxdpGCctQ", "number": 1569, "cdate": 1538088002249, "ddate": null, "tcdate": 1538088002249, "tmdate": 1750551532110, "tddate": null, "forum": "SJfcrn0qKX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Realistic Adversarial Examples in 3D Meshes", "abstract": "Highly expressive models especially deep neural networks (DNNs) have been widely applied to various applications and achieved increasing success. However, recent studies show that such machine learning models appear to be vulnerable against adversarial examples. So far adversarial examples have been heavily explored for 2D images, while few work has tried to understand the vulnerabilities of 3D objects which exist in real world, where 3D objects are projected to 2D domains by photo taking for different learning (recognition) tasks. In this paper we consider adversarial behaviors in practical scenarios by manipulating the shape and texture of a given 3D mesh representation of an object. Our goal is to project the optimized \"adversarial meshes\" to 2D with photo-realistic rendering engine, and still able to mislead different machine learning models.\nExtensive experiments show that by generating unnoticeable 3D adversarial perturbation on shape or texture for a 3D mesh, the corresponding projected 2D instance can either lead classifiers to misclassify the victim object arbitrary malicious target, or hide any target object within the scene from state-of-the-art object detectors. We conduct human studies to show that our optimized adversarial 3D perturbation is highly unnoticeable for human vision systems. In addition to the subtle perturbation on a given 3D mesh, we also propose to synthesize a realistic 3D mesh to put in a scene mimicking similar rendering conditions and therefore attack existing objects within it. In-depth analysis for transferability among different 3D rendering engines and vulnerable regions of meshes are provided to help better understand adversarial behaviors in practice and motivate potential defenses. ", "keywords": [], "authorids": ["xiaocw@umich.edu", "ydawei@umich.edu", "lxbosky@gmail.com", "jiadeng@cs.princeton.edu", "mingyan@umich.edu"], "authors": ["Chaowei Xiao", "Dawei Yang", "Bo Li", "Jia Deng", "Mingyan Liu"], "pdf": "/pdf/03109e809e97c0af6b7d9c29fe232b5e3e5eca25.pdf", "paperhash": "xiao|realistic_adversarial_examples_in_3d_meshes", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/realistic-adversarial-examples-in-3d-meshes/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538088002274, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HJlPB2CqYQ", "original": "B1xCqVR5K7", "number": 1548, "cdate": 1538087998639, "ddate": null, "tcdate": 1538087998639, "tmdate": 1683306265595, "tddate": null, "forum": "HJlPB2CqYQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "ISONETRY : GEOMETRY OF CRITICAL INITIALIZATIONS AND TRAINING", "abstract": "Recent work on critical initializations of deep neural networks has shown that by constraining the spectrum of input-output Jacobians allows for fast training of very deep networks without skip connections. The current understanding of this class of initializations is limited with respect to classical notions from optimization. In particular, the connections between Jacobian eigenvalues and curvature of the parameter space are unknown. Similarly, there is no firm understanding of the effects of maintaining orthogonality during training. With this work we complement the existing understanding of critical initializations and show that the curvature is proportional to the maximum singular value of the Jacobian. Furthermore we show that optimization under orthogonality constraints ameliorates the dependence on choice of initial parameters, but is not strictly necessary.", "keywords": ["Deep learning"], "authorids": ["piotr.sokol@stonybrook.edu", "memming.park@stonybrook.edu"], "authors": ["Piotr A Sokol", "Il Memming Park"], "pdf": "/pdf/207f8f6b3a8286498b9a632bdca7962730e5fd69.pdf", "paperhash": "sokol|isonetry_geometry_of_critical_initializations_and_training"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087998666, "details": {"replyCount": 1, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "Sye8S209KX", "original": "SylGGIFQFm", "number": 1542, "cdate": 1538087997607, "ddate": null, "tcdate": 1538087997607, "tmdate": 1683306265490, "tddate": null, "forum": "Sye8S209KX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Learning Robust, Transferable Sentence Representations for Text Classification", "abstract": "Despite deep recurrent neural networks (RNNs) demonstrate strong performance in text classification, training RNN models are often expensive and requires an extensive collection of annotated data which may not be available. To overcome the data limitation issue, existing approaches leverage either pre-trained word embedding or sentence representation to lift the burden of training RNNs from scratch. In this paper, we show that jointly learning sentence representations from multiple text classification tasks and combining them with pre-trained word-level and sentence level encoders result in robust sentence representations that are useful for transfer learning. Extensive experiments and analyses using a wide range of transfer and linguistic tasks endorse the effectiveness of our approach.", "keywords": ["sentence representations learning", "multi-task learning", "transfer learning"], "authorids": ["wasiahmad@cs.ucla.edu", "xubai@cs.stonybrook.edu", "npeng@isi.edu", "kwchang@cs.ucla.edu"], "authors": ["Wasi Uddin Ahmad", "Xueying Bai", "Nanyun Peng", "Kai-Wei Chang"], "pdf": "/pdf/a248efaf6ed8b2725113da8e16542af15cff0534.pdf", "paperhash": "ahmad|learning_robust_transferable_sentence_representations_for_text_classification"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087997632, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1MmH30cY7", "original": "rJx5fJ_cY7", "number": 1529, "cdate": 1538087995320, "ddate": null, "tcdate": 1538087995320, "tmdate": 1683306265485, "tddate": null, "forum": "r1MmH30cY7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "NA", "abstract": "NA", "keywords": [], "authorids": ["na@na.edu"], "authors": ["NA"], "TL;DR": "NA", "pdf": "/pdf/83c1b0d66eb83e66791f177659e9cf3fc3f26ce3.pdf", "paperhash": "na|na"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087995348, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HylXHhA9Km", "original": "H1eY2SC5Y7", "number": 1528, "cdate": 1538087995145, "ddate": null, "tcdate": 1538087995145, "tmdate": 1683306265485, "tddate": null, "forum": "HylXHhA9Km", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Statistical Characterization of Deep Neural Networks and their Sensitivity", "abstract": "Despite their ubiquity, it remains an active area of research to fully understand deep neural networks (DNNs) and the reasons of their empirical success. We contribute to this effort by introducing a principled approach to statistically characterize DNNs and their sensitivity. By distinguishing between randomness from input data and from model parameters, we study how central and non-central moments of network activation and sensitivity evolve during propagation. Thereby, we provide novel statistical insights on the hypothesis space of input-output mappings encoded by different architectures. Our approach applies both to fully-connected and convolutional networks and incorporates most ingredients of modern DNNs: rectified linear unit (ReLU) activation, batch normalization, skip connections.", "keywords": ["Statistics", "Sensitivity", "Exploding Gradient", "Convolutional Neural Networks", "Residual Neural Networks", "Batch Normalization"], "authorids": ["antoine.labatie@gmail.com"], "authors": ["Antoine Labatie"], "pdf": "/pdf/6c596efd134a2c1efb646443780339bb5c3b1f93.pdf", "paperhash": "labatie|statistical_characterization_of_deep_neural_networks_and_their_sensitivity"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087995168, "details": {"replyCount": 2, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "ryxfHnCctX", "original": "SJxw9S09Fm", "number": 1521, "cdate": 1538087993938, "ddate": null, "tcdate": 1538087993938, "tmdate": 1683306265318, "tddate": null, "forum": "ryxfHnCctX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "A Main/Subsidiary Network Framework for Simplifying Binary Neural Networks", "abstract": "To reduce memory footprint and run-time latency, techniques such as neural net-work pruning and binarization have been explored separately. However, it is un-clear how to combine the best of the two worlds to get extremely small and efficient models. In this paper, we, for the first time, define the filter-level pruning problem for binary neural networks, which cannot be solved by simply migrating existing structural pruning methods for full-precision models. A novel learning-based approach is proposed to prune filters in our main/subsidiary network frame-work, where the main network is responsible for learning representative features to optimize the prediction performance, and the subsidiary component works as a filter selector on the main network. To avoid gradient mismatch when training the subsidiary component, we propose a layer-wise and bottom-up scheme. We also provide the theoretical and experimental comparison between our learning-based and greedy rule-based methods. Finally, we empirically demonstrate the effectiveness of our approach applied on several binary models, including binarizedNIN, VGG-11, and ResNet-18, on various image classification datasets. For bi-nary ResNet-18 on ImageNet, we use 78.6% filters but can achieve slightly better test error 49.87% (50.02%-0.15%) than the original model", "paperhash": "xu|a_mainsubsidiary_network_framework_for_simplifying_binary_neural_networks", "TL;DR": "we define the filter-level pruning problem for binary neural networks for the first time and propose method to solve it.", "authorids": ["justimyhxu@zju.edu.cn", "xindong@g.harvard.edu", "daniellee2519@gmail.com", "haosu@eng.ucsd.edu"], "authors": ["Yinghao Xu", "Xin Dong", "Yudian Li", "Hao Su"], "keywords": ["efficient machine learning\uff0cbinary neural network"], "pdf": "/pdf/4740fa32f0d74ea3030758268df328c1383f844c.pdf"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087993963, "details": {"replyCount": 2, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rkMlSnAqYX", "original": "SklHV92qFX", "number": 1512, "cdate": 1538087992375, "ddate": null, "tcdate": 1538087992375, "tmdate": 1683306265212, "tddate": null, "forum": "rkMlSnAqYX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Mitigating Bias in Natural Language Inference Using Adversarial Learning", "abstract": "Recognizing the relationship between two texts is an important aspect of natural language understanding (NLU), and a variety of neural network models have been proposed for solving NLU tasks. Unfortunately, recent work showed that the datasets these models are trained on often contain biases that allow models to achieve non-trivial performance without possibly learning the relationship between the two texts. We propose a framework for building robust models by using adversarial learning to encourage models to learn latent, bias-free representations. We test our approach in a Natural Language Inference (NLI) scenario, and show that our adversarially-trained models learn robust representations that ignore known dataset-specific biases. Our experiments demonstrate that our models are more robust to new NLI datasets. ", "keywords": ["natural language inference", "adversarial learning", "bias", "artifacts"], "authorids": ["belinkov@seas.harvard.edu", "azpoliak@cs.jhu.edu", "shieber@seas.harvard.edu", "vandurme@cs.jhu.edu"], "authors": ["Yonatan Belinkov", "Adam Poliak", "Stuart M. Shieber", "Benjamin Van Durme"], "TL;DR": "Adversarial learning methods encourage NLI models to ignore dataset-specific biases and help models transfer across datasets.", "pdf": "/pdf/83b96d120c3a685a96940999f8a01601e1c433f6.pdf", "paperhash": "belinkov|mitigating_bias_in_natural_language_inference_using_adversarial_learning"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087992399, "details": {"replyCount": 9, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1x0E2C5tQ", "original": "HkxFMW39FX", "number": 1499, "cdate": 1538087990170, "ddate": null, "tcdate": 1538087990170, "tmdate": 1683306265155, "tddate": null, "forum": "B1x0E2C5tQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "What Is in a Translation Unit? Comparing Character and Subword Representations Beyond Translation", "abstract": "Recent work has shown that contextualized word representations derived from neural machine translation (NMT) are a viable alternative to such from simple word predictions tasks. This is because the internal understanding that needs to be built in order to be able to translate from one language to another is much more comprehensive. Unfortunately, computational and memory limitations as of present prevent NMT models from using large word vocabularies, and thus alternatives such as subword units (BPE and morphological segmentations) and characters have been used. Here we study the impact of using different kinds of units on the quality of the resulting representations when used to model syntax, semantics, and morphology. We found that while representations derived from subwords are slightly better for modeling syntax, character-based representations are superior for modeling morphology and are also more robust to noisy input.", "keywords": ["subwords", "representations", "word embeddings", "transfer learning", "machine translation", "natural language processing"], "authorids": ["ndurrani@qf.org.qa", "faimaduddin@qf.org.qa", "hsajjad@qf.org.qa", "belinkov@mit.edu", "pnakov@hbku.edu.qa"], "authors": ["Nadir Durrani", "Fahim Dalvi", "Hassan Sajjad", "Yonatan Belinkov", "Preslav Nakov"], "TL;DR": "We study the impact of using different kinds of subword units on the quality of the resulting representations when used to model syntax, semantics, and morphology.", "pdf": "/pdf/2d312fa4a5b420f25d1e46c9044bfc739d1d4c88.pdf", "paperhash": "durrani|what_is_in_a_translation_unit_comparing_character_and_subword_representations_beyond_translation"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087990194, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1gTE2AcKQ", "original": "S1gakg0qKQ", "number": 1494, "cdate": 1538087989300, "ddate": null, "tcdate": 1538087989300, "tmdate": 1683306265015, "tddate": null, "forum": "B1gTE2AcKQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "From Amortised to Memoised Inference: Combining Wake-Sleep and Variational-Bayes for Unsupervised Few-Shot Program Learning", "abstract": "Given a large database of concepts but only one or a few examples of each, can we learn models for each concept that are not only generalisable, but interpretable? In this work, we aim to tackle this problem through hierarchical Bayesian program induction. We present a novel learning algorithm which can infer concepts as short, generative, stochastic programs, while learning a global prior over programs to improve generalisation and a recognition network for efficient inference. Our algorithm, Wake-Sleep-Remember (WSR), combines gradient learning for continuous parameters with neurally-guided search over programs. We show that WSR learns compelling latent programs in two tough symbolic domains: cellular automata and Gaussian process kernels. We also collect and evaluate on a new dataset, Text-Concepts, for discovering structured patterns in natural text data.", "keywords": ["wake-sleep", "variational", "amortised inference", "hierarchical bayes", "program learning"], "authorids": ["lbh@mit.edu", "jbt@mit.edu"], "authors": ["Luke B. Hewitt", "Joshua B. Tenenbaum"], "TL;DR": "We extend the wake-sleep algorithm and use it to learn to learn structured models from few examples, ", "pdf": "/pdf/6b4afd038aded2639886e8adf9c9fb2b4f1c7dfe.pdf", "paperhash": "hewitt|from_amortised_to_memoised_inference_combining_wakesleep_and_variationalbayes_for_unsupervised_fewshot_program_learning"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087989339, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HJeq43AqF7", "original": "BJe4d2nqYm", "number": 1477, "cdate": 1538087986316, "ddate": null, "tcdate": 1538087986316, "tmdate": 1750551532390, "tddate": null, "forum": "HJeq43AqF7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Unsupervised Latent Tree Induction with Deep Inside-Outside Recursive Auto-Encoders ", "abstract": "Syntax is a powerful abstraction for language understanding. Many downstream tasks require segmenting input text into meaningful constituent chunks (e.g., noun phrases or entities); more generally, models for learning semantic representations of text benefit from integrating syntax in the form of parse trees (e.g., tree-LSTMs). Supervised parsers have traditionally been used to obtain these trees, but lately interest has increased in unsupervised methods that induce syntactic representations directly from unlabeled text. To this end, we propose the deep inside-outside recursive autoencoder (DIORA), a fully-unsupervised method for discovering syntax that simultaneously learns representations for constituents within the induced tree. Unlike many prior approaches, DIORA does not rely on supervision from auxiliary downstream tasks and is thus not constrained to particular domains. Furthermore, competing approaches do not learn explicit phrase representations along with tree structures, which limits their applicability to phrase-based tasks. Extensive experiments on unsupervised parsing, segmentation, and phrase clustering demonstrate the efficacy of our method. DIORA achieves the state of the art in unsupervised parsing (46.9 F1) on the benchmark WSJ dataset.", "keywords": ["latent-tree-learning", "unsupervised-parsing"], "authorids": ["adrozdov@cs.umass.edu", "pat@cs.umass.edu", "ymohit@cs.umass.edu", "miyyer@cs.umass.edu", "mccallum@cs.umass.edu"], "authors": ["Andrew Drozdov", "Patrick Verga", "Mohit Yadev", "Mohit Iyyer", "Andrew McCallum"], "TL;DR": "In this work we propose deep inside-outside recursive auto-encoders(DIORA) a fully unsupervised method of discovering syntax while simultaneously learning representations for discovered constituents. ", "pdf": "/pdf/c302a86a9f11dd4707d605f7a2e3afc5b5d1e3b7.pdf", "paperhash": "drozdov|unsupervised_latent_tree_induction_with_deep_insideoutside_recursive_autoencoders", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/unsupervised-latent-tree-induction-with-deep/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087986339, "details": {"replyCount": 9, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "ryxtE3C5Fm", "original": "rkxvw-R5KQ", "number": 1469, "cdate": 1538087984768, "ddate": null, "tcdate": 1538087984768, "tmdate": 1750551532421, "tddate": null, "forum": "ryxtE3C5Fm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "From Adversarial Training to Generative Adversarial Networks", "abstract": "In this paper, we are interested in two seemingly different concepts: \\textit{adversarial training} and \\textit{generative adversarial networks (GANs)}. Particularly, how these techniques work to improve each other. To this end, we analyze the limitation of adversarial training as a defense method, starting from questioning how well the robustness of a model can generalize. Then, we successfully improve the generalizability via data augmentation by the ``fake'' images sampled from generative adversarial network. After that, we are surprised to see that the resulting robust classifier leads to a better generator, for free. We intuitively explain this interesting phenomenon and leave the theoretical analysis for future work.\nMotivated by these observations, we propose a system that combines generator, discriminator, and adversarial attacker together in a single network. After end-to-end training and fine tuning, our method can simultaneously improve the robustness of classifiers, measured by accuracy under strong adversarial attacks, and the quality of generators, evaluated both aesthetically and quantitatively. In terms of the classifier, we achieve better robustness than the state-of-the-art adversarial training algorithm proposed in (Madry \\textit{et al.}, 2017), while our generator achieves competitive performance compared with SN-GAN (Miyato and Koyama, 2018).", "keywords": ["adversarial training", "conditional GAN"], "authorids": ["xqliu@cs.ucla.edu", "chohsieh@cs.ucla.edu"], "authors": ["Xuanqing Liu", "Cho-Jui Hsieh"], "TL;DR": "We found adversarial training not only speeds up the GAN training but also increases the image quality", "pdf": "/pdf/a79fb08d697cc5661f91d1953c42fe0d1d350d2d.pdf", "paperhash": "liu|from_adversarial_training_to_generative_adversarial_networks", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/from-adversarial-training-to-generative/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087984795, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HklUN3RcFX", "original": "B1xMC2h9Fm", "number": 1456, "cdate": 1538087982488, "ddate": null, "tcdate": 1538087982488, "tmdate": 1750551532472, "tddate": null, "forum": "HklUN3RcFX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Confidence-based Graph Convolutional Networks for Semi-Supervised Learning", "abstract": "Predicting properties of nodes in a graph is an important problem with applications in a variety of domains. Graph-based Semi Supervised Learning (SSL) methods aim to address this problem by labeling a small subset of the nodes as seeds, and then utilizing the graph structure to predict label scores for the rest of the nodes in the graph. Recently, Graph Convolutional Networks (GCNs) have achieved impressive performance on the graph-based SSL task. In addition to label scores, it is also desirable to have a confidence score associated with them. Unfortunately, confidence estimation in the context of GCN has not been previously explored. We fill this important gap in this paper and propose ConfGCN, which estimates labels scores along with their confidences jointly in GCN-based setting. ConfGCN uses these estimated confidences to determine the influence of one node on another during neighborhood aggregation, thereby acquiring anisotropic capabilities. Through extensive analysis and experiments on standard benchmarks, we find that ConfGCN is able to significantly outperform state-of-the-art baselines. We have made ConfGCN\u2019s source code available to encourage reproducible research.", "keywords": ["Graph Convolutional Networks", "GCN", "Confidence", "Semi-Supervised Learning", "Deep Learning", "Neural Networks"], "authorids": ["shikhar@iisc.ac.in", "prateekyadav@iisc.ac.in", "mbbhandarimanik@gmail.com", "ppt@iisc.ac.in"], "authors": ["Shikhar Vashishth", "Prateek Yadav", "Manik Bhandari", "Partha Talukdar"], "TL;DR": "We propose a confidence based Graph Convolutional Network for Semi-Supervised Learning.", "pdf": "/pdf/6951b9755540a31c3f1e9194e4febe3db9b3d18c.pdf", "paperhash": "vashishth|confidencebased_graph_convolutional_networks_for_semisupervised_learning", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/confidence-based-graph-convolutional-networks/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087982513, "details": {"replyCount": 3, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HkzL4hR9Ym", "original": "HylpAbC9FQ", "number": 1454, "cdate": 1538087982146, "ddate": null, "tcdate": 1538087982146, "tmdate": 1683306264632, "tddate": null, "forum": "HkzL4hR9Ym", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Shaping representations through communication", "abstract": "Good representations facilitate transfer learning and few-shot learning. Motivated by theories of language and communication that explain why communities with large number of speakers have, on average, simpler languages with more regularity, we cast the representation learning problem in terms of learning to communicate. Our starting point sees traditional autoencoders as a single encoder with a fixed decoder partner that must learn to communicate. Generalizing from there, we introduce community-based autoencoders in which multiple encoders and decoders collectively learn representations by being randomly paired up on successive training iterations. Our experiments show that increasing community sizes reduce idiosyncrasies in the learned codes, resulting in more invariant representations with increased reusability and structure.", "keywords": ["communication", "language", "representation learning", "autoencoders"], "authorids": ["tieleman@google.com", "angeliki@google.com", "shibl@google.com", "cblundell@google.com", "doinap@google.com"], "authors": ["Olivier Tieleman", "Angeliki Lazaridou", "Shibl Mourad", "Charles Blundell", "Doina Precup"], "TL;DR": "Motivated by theories of language and communication, we introduce community-based autoencoders, in which multiple encoders and decoders collectively learn structured and reusable representations.", "pdf": "/pdf/9d214dc113ef31d4b8071ed6b3ace7e96f05d757.pdf", "paperhash": "tieleman|shaping_representations_through_communication"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087982171, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1lf43A5Y7", "original": "rke_052ctQ", "number": 1428, "cdate": 1538087977602, "ddate": null, "tcdate": 1538087977602, "tmdate": 1683306264467, "tddate": null, "forum": "B1lf43A5Y7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "How to learn (and how not to learn) multi-hop reasoning with memory networks", "abstract": "Answering questions about a text frequently requires aggregating information from multiple places in that text. End-to-end neural network models, the dominant approach in the current literature, can theoretically learn how to distill and manipulate representations of the text without explicit supervision about how to do so. We investigate a canonical architecture for this task, the memory network, and analyze how effective it really is in the context of three multi-hop reasoning settings. In a simple synthetic setting, the path-finding task of the bAbI dataset, the model fails to learn the correct reasoning without additional supervision of its attention mechanism. However, with this supervision, it can perform well. On a real text dataset, WikiHop, the memory network gives nearly state-of-the-art performance, but does so without using its multi-hop capabilities. A tougher anonymized version of the WikiHop dataset is qualitatively similar to bAbI: the model fails to perform well unless it has additional supervision. We hypothesize that many \"multi-hop\" architectures do not truly learn this reasoning as advertised, though they could learn this reasoning if appropriately supervised.", "keywords": ["NLP", "Reading Comprehension", "Memory Networks", "Multi-hop Reasoning"], "authorids": ["jf_chen@utexas.edu", "gdurrett@cs.utexas.edu"], "authors": ["Jifan Chen", "Greg Durrett"], "TL;DR": "Memory Networks do not learn multi-hop reasoning unless we supervise them.", "pdf": "/pdf/efe68d4f90d5ae5c7fc90e811b84fff053c5de24.pdf", "paperhash": "chen|how_to_learn_and_how_not_to_learn_multihop_reasoning_with_memory_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087977629, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SylWNnA5FQ", "original": "rJgn8GR9t7", "number": 1426, "cdate": 1538087977247, "ddate": null, "tcdate": 1538087977247, "tmdate": 1683306264321, "tddate": null, "forum": "SylWNnA5FQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Program Synthesis with Learned Code Idioms", "abstract": "Program synthesis of general-purpose source code from natural language specifi-\ncations is challenging due to the need to reason about high-level patterns in the\ntarget program and low-level implementation details at the same time. In this work,\nwe present PATOIS , the first system that allows a neural program synthesizer to\nexplicitly interleave high-level and low-level reasoning at every generation step. It\naccomplishes this by automatically mining common code idioms from a given cor-\npus and then incorporating them into the underlying language for neural synthesis.\nWe evaluate PATOIS on a challenging program synthesis dataset NAPS and show\nthat using learned code idioms improves the synthesizer\u2019s accuracy.", "keywords": ["program synthesis", "semantic parsing", "code idioms", "domain-specific languages"], "authorids": ["ricshin@cs.berkeley.edu", "mabrocks@microsoft.com", "miallama@microsoft.com", "polozov@microsoft.com"], "authors": ["Richard Shin", "Marc Brockschmidt", "Miltiadis Allamanis", "Oleksandr Polozov"], "pdf": "/pdf/4df46d32f166659647231f7afcda8c5bc453cf07.pdf", "paperhash": "shin|program_synthesis_with_learned_code_idioms"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087977275, "details": {"replyCount": 1, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SkeZEhR5FQ", "original": "HJxVnVfcKX", "number": 1423, "cdate": 1538087976732, "ddate": null, "tcdate": 1538087976732, "tmdate": 1683306264252, "tddate": null, "forum": "SkeZEhR5FQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Learning Graph Decomposition", "abstract": "We propose a novel end-to-end trainable framework for the graph decomposition problem. The minimum cost multicut problem is first converted to an unconstrained binary cubic formulation where cycle consistency constraints are incorporated into the objective function. The new optimization problem can be viewed as a Conditional Random Field (CRF) in which the random variables are associated with the binary edge labels of the initial graph and the hard constraints are introduced in the CRF as high-order potentials. The parameters of a standard Neural Network and the fully differentiable CRF can be optimized in an end-to-end manner. We demonstrate the proposed learning algorithm in the context of clustering of hand written digits, particularly in a setting where no direct supervision for the graph decomposition task is available, and multiple person pose estimation from images in the wild. The experiments validate the effectiveness of our approach both for the feature learning and for the final clustering task.", "keywords": ["multicut graph decomposition", "optimization by learning", "pose estimation", "clustering"], "authorids": ["jsong@inf.ethz.ch", "bjoern.andres@de.bosch.com", "black@tuebingen.mpg.de", "otmar.hilliges@inf.ethz.ch", "stang@tuebingen.mpg.de"], "authors": ["Jie Song", "Bjoern Andres", "Michael Black", "Otmar Hilliges", "Siyu Tang"], "pdf": "/pdf/4a6c48b7a47e057623462da1e3110782a9d9b98d.pdf", "paperhash": "song|learning_graph_decomposition"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087976758, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rJeyV2AcKX", "original": "HJgF2pp9YQ", "number": 1415, "cdate": 1538087975374, "ddate": null, "tcdate": 1538087975374, "tmdate": 1683306264198, "tddate": null, "forum": "rJeyV2AcKX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Network Reparameterization for Unseen Class Categorization", "abstract": "Many problems with large-scale labeled training data have been impressively solved by deep learning. However, Unseen Class Categorization (UCC) with minimal information provided about target classes is the most commonly encountered setting in industry, which remains a challenging research problem in machine learning. Previous approaches to UCC either fail to generate a powerful discriminative feature extractor or fail to learn a flexible classifier that can be easily adapted to unseen classes. In this paper, we propose to address these issues through network reparameterization, \\textit{i.e.}, reparametrizing the learnable weights of a network as a function of other variables, by which we decouple the feature extraction part and the classification part of a deep classification model to suit the special setting of UCC, securing both strong discriminability and excellent adaptability. Extensive experiments for UCC on several widely-used benchmark datasets in the settings of zero-shot and few-shot learning demonstrate that, our method with network reparameterization achieves state-of-the-art performance.", "keywords": ["Unseen class categorization", "network reparameterization", "few-shot learning", "zero-shot learning"], "authorids": ["li.kai.gml@gmail.com", "renqiang@nec-labs.com", "bbai@nec-labs.com", "yunfu@ece.neu.edu", "hpg@nec-labs.com"], "authors": ["Kai Li", "Martin Renqiang Min", "Bing Bai", "Yun Fu", "Hans Peter Graf"], "TL;DR": "A unified frame for both few-shot learning and zero-shot learning based on network reparameterization", "pdf": "/pdf/eb98361351aba2ddc9c40f83e35017f38d44991e.pdf", "paperhash": "li|network_reparameterization_for_unseen_class_categorization"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087975399, "details": {"replyCount": 13, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HyxyV209Y7", "original": "SJgqv7R9tm", "number": 1414, "cdate": 1538087975202, "ddate": null, "tcdate": 1538087975202, "tmdate": 1683306264082, "tddate": null, "forum": "HyxyV209Y7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Data Poisoning Attack against Unsupervised Node Embedding Methods", "abstract": "Unsupervised node embedding methods (e.g., DeepWalk, LINE, and node2vec) have attracted growing interests given their simplicity and effectiveness. However, although these methods have been proved effective in a variety of applications, none of the existing work has analyzed the robustness of them. This could be very risky if these methods are attacked by an adversarial party. In this paper, we take the task of link prediction as an example, which is one of the most fundamental problems for graph analysis, and introduce a data poisoning\nattack to node embedding methods. We give a complete characterization of attacker's utilities and present efficient solutions to adversarial attacks for two popular node embedding methods: DeepWalk and LINE. We evaluate our proposed attack model on multiple real-world graphs. Experimental results show that our proposed model can significantly affect the results of link prediction by slightly changing the graph structures (e.g., adding or removing a few edges). We also show that our proposed model is very general and can be transferable across different embedding methods. \nFinally, we conduct a case study on a coauthor network to better understand our attack method.", "paperhash": "sun|data_poisoning_attack_against_unsupervised_node_embedding_methods", "authorids": ["sunmj15@gmail.com", "tangjianpku@gmail.com", "huichen3@illinois.edu", "lxbosky@gmail.com", "xiaocw@umich.edu", "antoniechen@tencent.com", "dawnsong@gmail.com"], "authors": ["Mingjie Sun", "Jian Tang", "Huichen Li", "Bo Li", "Chaowei Xiao", "Yao Chen", "Dawn Song"], "keywords": [], "pdf": "/pdf/a725791a2d46a9c1ba863fde75156f32fb1f6572.pdf"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087975227, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HJz6QhR9YQ", "original": "BJe9Eipctm", "number": 1401, "cdate": 1538087972974, "ddate": null, "tcdate": 1538087972974, "tmdate": 1683306263993, "tddate": null, "forum": "HJz6QhR9YQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Hierarchical Deep Reinforcement Learning Agent with Counter Self-play on Competitive Games ", "abstract": "Deep Reinforcement Learning algorithms lead to agents that can solve difficult decision making problems in complex environments. However, many difficult multi-agent competitive games, especially real-time strategy games are still considered beyond the capability of current deep reinforcement learning algorithms, although there has been a recent effort to change this \\citep{openai_2017_dota, vinyals_2017_starcraft}. Moreover, when the opponents in a competitive game are suboptimal, the current \\textit{Nash Equilibrium} seeking, self-play algorithms are often unable to generalize their strategies to opponents that play strategies vastly different from their own. This suggests that a learning algorithm that is beyond conventional self-play is necessary. We develop Hierarchical Agent with Self-play (HASP), a learning approach for obtaining hierarchically structured policies that can achieve higher performance than conventional self-play on competitive games through the use of a diverse pool of sub-policies we get from Counter Self-Play (CSP). We demonstrate that the ensemble policy generated by HASP can achieve better performance while facing unseen opponents that use sub-optimal policies. On a motivating iterated Rock-Paper-Scissor game and a partially observable real-time strategic game (http://generals.io/), we are led to the conclusion that HASP can perform better than conventional self-play as well as achieve 77% win rate against FloBot, an open-source agent which has ranked at position number 2 on the online leaderboards.", "keywords": ["deep reinforcement learning", "self-play", "real-time strategic game", "multi-agent"], "authorids": ["huazhe_xu@berkeley.edu", "keirp@berkeley.edu", "cqb@tsinghua.edu.cn", "hrtang@math.berkeley.edu", "pabbeel@cs.berkeley.edu", "trevor@eecs.berkeley.edu", "svlevine@eecs.berkeley.edu"], "authors": ["Huazhe Xu", "Keiran Paster", "Qibin Chen", "Haoran Tang", "Pieter Abbeel", "Trevor Darrell", "Sergey Levine"], "TL;DR": "We develop Hierarchical Agent with Self-play (HASP), a learning approach for obtaining hierarchically structured policies that can achieve high performance than conventional self-play on competitive real-time strategic games.", "pdf": "/pdf/fe7c68a56facc76cf105f3d936721c20a65d4c6c.pdf", "paperhash": "xu|hierarchical_deep_reinforcement_learning_agent_with_counter_selfplay_on_competitive_games"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087973001, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1g6XnCcKQ", "original": "HklclZ3qFX", "number": 1400, "cdate": 1538087972806, "ddate": null, "tcdate": 1538087972806, "tmdate": 1683306263893, "tddate": null, "forum": "B1g6XnCcKQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Object-Contrastive Networks: Unsupervised Object Representations", "abstract": "Discovering objects and their attributes is of great importance for autonomous agents to effectively operate in human environments. This task is particularly challenging due to the ubiquitousness of objects and all their nuances in perceptual and semantic detail. In this paper we present an unsupervised approach for learning disentangled representations of objects entirely from unlabeled monocular videos. These continuous representations are not biased by or limited by a discrete set of labels determined by human labelers. The proposed representation is trained with a metric learning loss, where objects with homogeneous features are pushed together, while those with heterogeneous features are pulled apart. We show these unsupervised embeddings allow to discover object attributes and can enable robots to self-supervise in previously unseen environments. We quantitatively evaluate performance on a large-scale synthetic dataset with 12k object models, as well as on a real dataset collected by a robot and show that our unsupervised object understanding generalizes to previously unseen objects. Specifically, we demonstrate the effectiveness of our approach on robotic manipulation tasks, such as pointing at and grasping of objects. An interesting and perhaps surprising finding in this approach is that given a limited set of objects, object correspondences will naturally emerge when using metric learning without requiring explicit positive pairs.", "keywords": ["self-supervised robotics", "object understanding", "object representations", "metric learning", "unsupervised vision"], "authorids": ["pirk@google.com", "khansari@google.com", "yunfeibai@google.com", "coreylynch@google.com", "sermanet@google.com"], "authors": ["Soeren Pirk", "Mohi Khansari", "Yunfei Bai", "Corey Lynch", "Pierre Sermanet"], "TL;DR": "An unsupervised approach for learning disentangled representations of objects entirely from unlabeled monocular videos.", "pdf": "/pdf/de707f8d8cde22b31f14616b7200cb0b62d80870.pdf", "paperhash": "pirk|objectcontrastive_networks_unsupervised_object_representations"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087972829, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJepX2A9tX", "original": "r1l1Cup5KQ", "number": 1399, "cdate": 1538087972638, "ddate": null, "tcdate": 1538087972638, "tmdate": 1683306263804, "tddate": null, "forum": "BJepX2A9tX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Rotation Equivariant Networks via Conic Convolution and the DFT", "abstract": "Performance of neural networks can be significantly improved by encoding known invariance for particular tasks. Many image classification tasks, such as those related to cellular imaging, exhibit invariance to rotation. In particular, to aid convolutional neural networks in learning rotation invariance, we consider a simple, efficient conic convolutional scheme that encodes rotational equivariance, along with a method for integrating the magnitude response of the 2D-discrete-Fourier transform (2D-DFT) to encode global rotational invariance. We call our new method the Conic Convolution and DFT Network (CFNet). We evaluated the efficacy of CFNet as compared to a standard CNN and group-equivariant CNN (G-CNN) for several different image classification tasks and demonstrated improved performance, including classification accuracy, computational efficiency, and its robustness to hyperparameter selection. Taken together, we believe CFNet represents a new scheme that has the potential to improve many imaging analysis applications.", "keywords": ["deep learning", "rotation equivariance", "bioimaging analysis"], "authorids": ["bchidest@andrew.cmu.edu", "minhdo@illinois.edu", "jianma@cs.cmu.edu"], "authors": ["Benjamin Chidester", "Minh N. Do", "Jian Ma"], "TL;DR": "We propose conic convolution and the 2D-DFT to encode rotation equivariance into an neural network.", "pdf": "/pdf/212de963eee4c532717aafdd0d15df3a2050582f.pdf", "paperhash": "chidester|rotation_equivariant_networks_via_conic_convolution_and_the_dft"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087972663, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "Sylw7nCqFQ", "original": "Syx_JFvoOm", "number": 1367, "cdate": 1538087966981, "ddate": null, "tcdate": 1538087966981, "tmdate": 1683306263699, "tddate": null, "forum": "Sylw7nCqFQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "IMAGE DEFORMATION META-NETWORK FOR ONE-SHOT LEARNING", "abstract": "Humans can robustly learn novel visual concepts even when images undergo various deformations and loose certain information. Incorporating this ability to synthesize deformed instances of new concepts might help visual recognition systems perform better one-shot learning, i.e., learning concepts from one or few examples. Our key insight is that, while the deformed images might not be visually realistic, they still maintain critical semantic information and contribute significantly in formulating classifier decision boundaries. Inspired by the recent progress on meta-learning, we combine a meta-learner with an image deformation network that produces additional training examples, and optimize both models in an endto- end manner. The deformation network learns to synthesize images by fusing a pair of images\u2014a probe image that keeps the visual content and a gallery image that diversifies the deformations. We demonstrate results on the widely used oneshot learning benchmarks (miniImageNet and ImageNet 1K challenge datasets), which significantly outperform the previous state-of-the-art approaches.", "keywords": [], "authorids": ["tankche2@gmail.com", "yanweifu@fudan.edu.cn", "yuxiongw@cs.cmu.edu", "forest.linma@gmail.com", "wl2223@columbia.edu", "hebert@ri.cmu.edu"], "authors": ["Zitian Chen", "Yanwei Fu", "Yu-Xiong Wang", "Lin Ma", "Wei Liu", "Martial Hebert"], "pdf": "/pdf/ed2810a0ec8c3f74e4f1404d164ca5c6bdfb07c6.pdf", "paperhash": "chen|image_deformation_metanetwork_for_oneshot_learning"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087967005, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HJxUX2C9Ym", "original": "rylEgRTcFQ", "number": 1362, "cdate": 1538087966126, "ddate": null, "tcdate": 1538087966126, "tmdate": 1683306263622, "tddate": null, "forum": "HJxUX2C9Ym", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Iterative Binary Decisions", "abstract": "The complexity of functions a neural network approximates make it hard to explain what the classification decision is based on. In this work, we present a framework that exposes more information about this decision-making process. Instead of producing a classification in a single step, our model iteratively makes binary sub-decisions which, when combined as a whole, ultimately produces the same classification result while revealing a decision tree as thought process. While there is generally a trade-off between interpretability and accuracy, the insights our model generates come at a negligible loss in accuracy. The decision tree resulting from the sequence of binary decisions of our model reveal a hierarchical clustering of the data and can be used as learned attributes in zero-shot learning.", "keywords": ["explainable AI", "interpretability", "deep learning", "decision tree", "zero-shot learning"], "authorids": ["s.alaniz@uva.nl", "z.akata@uva.nl"], "authors": ["Stephan Alaniz", "Zeynep Akata"], "pdf": "/pdf/a66e6bbdcf336d21c6d37985525568e0e3a48402.pdf", "paperhash": "alaniz|iterative_binary_decisions"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087966149, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJlSQnR5t7", "original": "HJekaeTqKQ", "number": 1358, "cdate": 1538087965454, "ddate": null, "tcdate": 1538087965454, "tmdate": 1683306263568, "tddate": null, "forum": "BJlSQnR5t7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Deepstr\u00f6m Networks", "abstract": "Recent work has focused on combining kernel methods and deep learning. With this in mind, we introduce Deepstr\u00f6m networks -- a new architecture of neural networks which we use to replace top dense layers of standard convolutional architectures with an approximation of a kernel function by relying on the Nystr\u00f6m approximation. \nOur approach is easy highly flexible. It is compatible with any kernel function and it allows exploiting multiple kernels. \nWe show that Deepstr\u00f6m networks reach state-of-the-art performance on standard datasets like SVHN and CIFAR100. One benefit of the method lies in its limited number of learnable parameters which make it particularly suited for small training set sizes, e.g. from 5 to 20 samples per class. Finally we illustrate two ways of using multiple kernels, including a multiple Deepstr\u00f6m setting, that exploits a kernel on each feature map output by the convolutional part of the model. ", "keywords": ["kernels", "Nystr\u00f6m approximation", "deep convnets"], "authorids": ["luc.giffon@lis-lab.fr", "hachem.kadri@lis-lab.fr", "stephane.ayache@lis-lab.fr", "thierry.artieres@lis-lab.fr"], "authors": ["Luc Giffon", "Hachem Kadri", "St\u00e9phane Ayache", "Thierry Arti\u00e8res"], "TL;DR": "A new neural architecture where top dense layers of standard convolutional architectures are replaced with an approximation of a kernel function by relying on the Nystr\u00f6m approximation.", "pdf": "/pdf/58aa26133d794a545700320dfeed0cfbda47e6ea.pdf", "paperhash": "giffon|deepstr\u00f6m_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087965480, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HkNN7nR5Ym", "original": "HyxujC65tQ", "number": 1351, "cdate": 1538087964286, "ddate": null, "tcdate": 1538087964286, "tmdate": 1683306263440, "tddate": null, "forum": "HkNN7nR5Ym", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Associate Normalization", "abstract": "Normalization is a key technique for training deep neural networks. It improves the stability of the training process and thus makes the networks easier to train. However, in typical normalization methods, the rescaling parameters that control the mean and variance of the output do not associate with any input information during the forward phase. Therefore, inputs of different types are treated as from the exact same distribution, which may limit the feature expressiveness of normalization module. We present Associate Normalization (AssocNorm) to overcome the above limitation. AssocNorm extracts the key information from input features and connects them with rescaling parameters by an auto-encoder-like neural network in the normalization module. Furthermore, AssocNorm normalizes the features of each example individually, so the accuracy is relatively stable for different batch sizes. The experimental results show that AssocNorm achieves better performance than Batch Normalization on several benchmark datasets under various hyper-parameter settings.", "keywords": [], "authorids": ["gasoonjia@icloud.com", "djchen.tw@gmail.com", "htchen@cs.nthu.edu.tw"], "authors": ["Song-Hao Jia", "Ding-Jie Chen", "Hwann-Tzong Chen"], "pdf": "/pdf/dafadd0d99988dd952ac6d854e9bf8a3c2e6ba4e.pdf", "paperhash": "jia|associate_normalization"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087964310, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SyG1QnRqF7", "original": "ByxPS-CqFm", "number": 1321, "cdate": 1538087959166, "ddate": null, "tcdate": 1538087959166, "tmdate": 1683306263304, "tddate": null, "forum": "SyG1QnRqF7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Towards Resisting Large Data Variations via Introspective Learning", "abstract": "Learning deep networks which can resist large variations between training andtesting data is essential to build accurate and robust image classifiers. Towardsthis end, a typical strategy is to apply data augmentation to enlarge the trainingset. However, standard data augmentation is essentially a brute-force strategywhich is inefficient, as it performs all the pre-defined transformations to everytraining sample. In this paper, we propose a principled approach to train networkswith significantly improved resistance to large variations between training andtesting data. This is achieved by embedding a learnable transformation moduleinto the introspective networks (Jin et al., 2017; Lazarow et al., 2017; Lee et al.,2018), which is a convolutional neural network (CNN) classifier empowered withgenerative capabilities. Our approach alternatively synthesizes pseudo-negativesamples with learned transformations and enhances the classifier by retraining itwith synthesized samples. Experimental results verify that our approach signif-icantly improves the ability of deep networks to resist large variations betweentraining and testing data and achieves classification accuracy improvements onseveral benchmark datasets, including MNIST, affNIST, SVHN and CIFAR-10.", "keywords": ["Introspective learning", "Large variations resistance", "Image classification", "Generative models"], "authorids": ["yzhao83@jhu.edu", "tytian@outlook.com", "shenwei1231@gmail.com", "alan.l.yuille@gmail.com"], "authors": ["Yunhan Zhao", "Ye Tian", "Wei Shen", "Alan Yuille"], "TL;DR": "We propose a principled approach that endows classifiers with the ability to resist larger variations between training and testing data in an intelligent and efficient manner.", "pdf": "/pdf/d4eb9f4a639aa5465cf1f78ce7f013fb572766ba.pdf", "paperhash": "zhao|towards_resisting_large_data_variations_via_introspective_learning"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087959190, "details": {"replyCount": 11, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BkeAf2CqY7", "original": "SJlxGPLctX", "number": 1313, "cdate": 1538087957792, "ddate": null, "tcdate": 1538087957792, "tmdate": 1683306263257, "tddate": null, "forum": "BkeAf2CqY7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Efficient Federated Learning via Variational Dropout", "abstract": "As an emerging field, federated learning has recently attracted considerable attention.\nCompared to distributed learning in the datacenter setting, federated learning\nhas more strict constraints on computate efficiency of the learned model and communication\ncost during the training process. In this work, we propose an efficient\nfederated learning framework based on variational dropout. Our approach is able\nto jointly learn a sparse model while reducing the amount of gradients exchanged\nduring the iterative training process. We demonstrate the superior performance\nof our approach on achieving significant model compression and communication\nreduction ratios with no accuracy loss.", "keywords": ["federated learning", "communication efficient", "variational dropout", "sparse model"], "authorids": ["duwei1@msu.edu", "zengxia6@msu.edu", "myan@msu.edu", "mizhang@msu.edu"], "authors": ["Wei Du", "Xiao Zeng", "Ming Yan", "Mi Zhang"], "TL;DR": "a joint model and gradient sparsification method for federated learning", "pdf": "/pdf/c41dd33dfd4a894f2150995ab3092a9a031cd16f.pdf", "paperhash": "du|efficient_federated_learning_via_variational_dropout"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087957817, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJgAfh09tm", "original": "ByxD1WActX", "number": 1312, "cdate": 1538087957627, "ddate": null, "tcdate": 1538087957627, "tmdate": 1683306263140, "tddate": null, "forum": "BJgAfh09tm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Bilingual-GAN: Neural Text Generation and Neural Machine Translation as Two Sides of the Same Coin", "abstract": "Latent space based GAN methods and attention based encoder-decoder architectures have achieved impressive results in text generation and Unsupervised NMT respectively. Leveraging the two domains, we propose an adversarial latent space based architecture capable of generating parallel sentences in two languages concurrently and translating bidirectionally. The bilingual generation goal is achieved by sampling from the latent space that is adversarially constrained to be shared between both languages. First an NMT model is trained, with back-translation and an adversarial setup, to enforce a latent state between the two languages. The encoder and decoder are shared for the two translation directions. Next, a GAN is trained to generate \u2018synthetic\u2019 code mimicking the languages\u2019 shared latent space. This code is then fed into the decoder to generate text in either language. We perform our experiments on Europarl and Multi30k datasets, on the English-French language pair, and document our performance using both Supervised and Unsupervised NMT.", "keywords": ["Text Generation", "Machine Translation", "Deep Learning", "GAN"], "authorids": ["ahmadrash@gmail.com", "alan.do-omri@mail.mcgill.ca", "mehdi.rezagholizadeh@gmail.com", "md.akmal.haidar@huawei.com", "haamed.sadeghi@gmail.com"], "authors": ["Ahmad Rashid", "Alan Do-Omri", "Mehdi Rezagholizadeh", "Md. Akmal Haidar", "Hamed Sadeghi"], "TL;DR": "We present a novel method for Bilingual Text Generation producing parallel concurrent sentences in two languages.", "pdf": "/pdf/0b8cbbd7c568ec67b6b6135d588e1f1a0154c853.pdf", "paperhash": "rashid|bilingualgan_neural_text_generation_and_neural_machine_translation_as_two_sides_of_the_same_coin"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087957651, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "S1g6zn09tm", "original": "H1xk8RpcYm", "number": 1310, "cdate": 1538087957286, "ddate": null, "tcdate": 1538087957286, "tmdate": 1683306263040, "tddate": null, "forum": "S1g6zn09tm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Latent Transformations for Object View Points Synthesis", "abstract": "We propose a fully-convolutional conditional generative model, the latent transformation neural network (LTNN), capable of view synthesis using a light-weight neural network suited for real-time applications. In contrast to existing conditional\ngenerative models which incorporate conditioning information via concatenation, we introduce a dedicated network component, the conditional transformation unit (CTU), designed to learn the latent space transformations corresponding to specified target views. In addition, a consistency loss term is defined to guide the network toward learning the desired latent space mappings, a task-divided decoder is constructed to refine the quality of generated views, and an adaptive discriminator is introduced to improve the adversarial training process. The generality of the proposed methodology is demonstrated on a collection of three diverse tasks: multi-view reconstruction on real hand depth images, view synthesis of real and synthetic faces, and the rotation of rigid objects. The proposed model is shown to exceed state-of-the-art results in each category while simultaneously achieving a reduction in the computational demand required for inference by 30% on average.", "keywords": ["conditional generative model", "deep learning", "fully-convolutional network", "image attribute modification", "multi-view reconstruction", "view sythesis"], "authorids": ["kim2030@purdue.edu", "nwinovic@purdue.edu", "chi45@purdue.edu", "guanglin@purdue.edu", "ramani@purdue.edu"], "authors": ["Sangpil Kim", "Nick Winovich", "Hyung-gun Chi", "Guang Lin", "Karthik Ramani"], "TL;DR": "We introduce an effective, general framework for incorporating conditioning information into inference-based generative models.", "pdf": "/pdf/45c5ec09abca07f62f420d008909155fbc5562e2.pdf", "paperhash": "kim|latent_transformations_for_object_view_points_synthesis"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087957312, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJlif3C5FQ", "original": "Syg1k6J5KX", "number": 1294, "cdate": 1538087954564, "ddate": null, "tcdate": 1538087954564, "tmdate": 1750551533086, "tddate": null, "forum": "BJlif3C5FQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Learning to Attend On Essential Terms: An Enhanced Retriever-Reader Model for Open-domain Question Answering", "abstract": "Open-domain question answering remains a challenging task as it requires models that are capable of understanding questions and answers, collecting useful information, and reasoning over evidence. Previous work typically formulates this task as a reading comprehension or entailment problem given evidence retrieved from search engines. However, existing techniques struggle to retrieve indirectly related evidence when no directly related evidence is provided, especially for complex questions where it is hard to parse precisely what the question asks. In this paper we propose a retriever-reader model that learns to attend on essential terms during the question answering process. We build (1) an essential term selector which first identifies the most important words in a question, then reformulates the query and searches for related evidence; and (2) an enhanced reader that distinguishes between essential terms and distracting words to predict the answer. We evaluate our model on multiple open-domain QA datasets where it outperforms the existing state-of-the-art, notably leading to an improvement of 8.1% on the AI2 Reasoning Challenge (ARC) dataset.", "keywords": ["Open-domain question answering"], "authorids": ["jin018@ucsd.edu", "chezhu@microsoft.com", "wzchen@microsoft.com", "jmcauley@cs.ucsd.edu"], "authors": ["Jianmo Ni", "Chenguang Zhu", "Weizhu Chen", "Julian McAuley"], "pdf": "/pdf/dee21fa5763055629b0e8bfa7b9123e9eeac0a37.pdf", "paperhash": "ni|learning_to_attend_on_essential_terms_an_enhanced_retrieverreader_model_for_opendomain_question_answering", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/learning-to-attend-on-essential-terms-an/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087954587, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1g5Gh05KQ", "original": "BJeuxc6cKX", "number": 1292, "cdate": 1538087954218, "ddate": null, "tcdate": 1538087954218, "tmdate": 1683306262828, "tddate": null, "forum": "r1g5Gh05KQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Advanced Neuroevolution: A gradient-free algorithm to train Deep Neural Networks", "abstract": "In this paper we present a novel optimization algorithm called Advanced Neuroevolution. The aim for this algorithm is to train deep neural networks, and eventually act as an alternative to Stochastic Gradient Descent (SGD) and its variants as needed.We evaluated our algorithm on the MNIST dataset, as well as on several global optimization problems such as the Ackley function. We find the algorithm performing relatively well for both cases, overtaking other global optimization algorithms such as Particle Swarm Optimization (PSO) and Evolution Strategies (ES).\n", "keywords": ["Evolutionary Algorithm", "Optimization", "MNIST"], "authorids": ["aaa2cn@virginia.edu", "dweikersdorfer@nvidia.com", "cdelaunay@nvidia.com"], "authors": ["Ahmed Aly", "David Weikersdorfer", "Claire Delaunay"], "TL;DR": "A new algorithm to train deep neural networks. Tested on optimization functions and MNIST.", "pdf": "/pdf/2ae00ab0d742f5c97e9625ad257b7fcac027ad0b.pdf", "paperhash": "aly|advanced_neuroevolution_a_gradientfree_algorithm_to_train_deep_neural_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087954242, "details": {"replyCount": 17, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1lcM3AcKm", "original": "BkxHz6ncKm", "number": 1291, "cdate": 1538087954045, "ddate": null, "tcdate": 1538087954045, "tmdate": 1683306262779, "tddate": null, "forum": "r1lcM3AcKm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "RNNs with Private and Shared Representations for Semi-Supervised Sequence Learning", "abstract": "Training recurrent neural networks (RNNs) on long sequences using backpropagation through time (BPTT) remains a fundamental challenge. \nIt has been shown that adding a local unsupervised loss term into the optimization objective makes the training of RNNs on long sequences more effective. \nWhile the importance of an unsupervised task can in principle be controlled by a coefficient in the objective function, the gradients with respect to the unsupervised loss term still influence all the hidden state dimensions, which might cause important information about the supervised task to be degraded or erased. \nCompared to existing semi-supervised sequence learning methods, this paper focuses upon a traditionally overlooked mechanism -- an architecture with explicitly designed private and shared hidden units designed to mitigate the detrimental influence of the auxiliary unsupervised loss over the main supervised task.\nWe achieve this by dividing RNN hidden space into a private space for the supervised task and a shared space for both the supervised and unsupervised tasks. We present extensive experiments with the proposed framework on several long sequence modeling benchmark datasets. Results indicate that the proposed framework can yield performance gains in RNN models where long term dependencies are notoriously challenging to deal with. ", "keywords": ["recurrent neural network", "semi-supervised learning"], "authorids": ["olga.xu@umontreal.ca", "jie.fu@polymtl.ca", "pfliu14@fudan.edu.cn", "zhi-hao.luo@polymtl.ca", "christopher.pal@polymtl.ca"], "authors": ["Ge Ya Luo", "Jie Fu", "Pengfei Liu", "Zhi Hao Luo", "Chris Pal"], "TL;DR": "This paper focuses upon a traditionally overlooked mechanism -- an architecture with explicitly designed private and shared hidden units designed to mitigate the detrimental influence of the auxiliary unsupervised loss over the main supervised task.", "pdf": "/pdf/b845dfbee6c7106841ddce3748ea671dc2ce68a3.pdf", "paperhash": "luo|rnns_with_private_and_shared_representations_for_semisupervised_sequence_learning"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087954069, "details": {"replyCount": 8, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJg_fnRqF7", "original": "H1llo5DFFX", "number": 1279, "cdate": 1538087951984, "ddate": null, "tcdate": 1538087951984, "tmdate": 1683306262627, "tddate": null, "forum": "BJg_fnRqF7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Deep clustering based on a mixture of autoencoders", "abstract": "In this paper we propose a Deep Autoencoder Mixture Clustering (DAMIC) algorithm. It is based on a mixture of deep autoencoders where each cluster is represented by an autoencoder. A clustering network transforms the data into another space and then selects one of the clusters. Next, the autoencoder associated with this cluster is used to reconstruct the data-point. The clustering algorithm jointly learns the nonlinear data representation and the set of autoencoders. The optimal clustering is found by minimizing the reconstruction loss of the mixture of autoencoder network. Unlike other deep clustering algorithms, no regularization term is needed to avoid data collapsing to a single point. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.", "keywords": ["deep clustering", "mixture of experts", "mixture of autoencoders"], "authorids": ["shlomi.chazan@biu.ac.il", "sharon.gannot@biu.ac.il", "jacob.goldberger@biu.ac.il"], "authors": ["Shlomo E. Chazan", "Sharon Gannot", "Jacob Goldberger"], "TL;DR": "We propose a deep clustering method where instead of a centroid each cluster is represented by an autoencoder", "pdf": "/pdf/a71fe2eb4a08b69562465509f78c87f8a844cf53.pdf", "paperhash": "chazan|deep_clustering_based_on_a_mixture_of_autoencoders"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087952008, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "ByWMz305FQ", "original": "BklBHz09FQ", "number": 1246, "cdate": 1538087946320, "ddate": null, "tcdate": 1538087946320, "tmdate": 1683306262535, "tddate": null, "forum": "ByWMz305FQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "The Missing Ingredient in Zero-Shot Neural Machine Translation", "abstract": "Multilingual Neural Machine Translation (NMT) systems are capable of translating between multiple source and target languages within a single system. An important indicator of generalization within these systems is the quality of zero-shot translation - translating between language pairs that the system has never seen during training. However, until now, the zero-shot performance of multilingual models has lagged far behind the quality that can be achieved by using a two step translation process that pivots through an intermediate language (usually English). In this work, we diagnose why multilingual models under-perform in zero shot settings. We propose explicit language invariance losses that guide an NMT encoder towards learning language agnostic representations. Our proposed strategies significantly improve zero-shot translation performance on WMT English-French-German and on the IWSLT 2017 shared task, and for the first time, match the performance of pivoting approaches while maintaining performance on supervised directions.", "keywords": ["Machine Translation", "Multi-lingual processing", "Zero-Shot translation"], "authorids": ["naveenariva@gmail.com", "ankurbpn@google.com", "orhanf@google.com", "roee.aharoni@gmail.com", "melvinp@google.com", "wmach@google.com"], "authors": ["Naveen Arivazhagan", "Ankur Bapna", "Orhan Firat", "Roee Aharoni", "Melvin Johnson", "Wolfgang Macherey"], "TL;DR": "Simple similarity constraints on top of multilingual NMT enables high quality translation between unseen language pairs for the first time.", "pdf": "/pdf/225e5c63da4bdd08cc6bf77cdd1c7de0b4d3e32e.pdf", "paperhash": "arivazhagan|the_missing_ingredient_in_zeroshot_neural_machine_translation"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087946343, "details": {"replyCount": 10, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rJlfzhA9Y7", "original": "HJxAC-C5tX", "number": 1243, "cdate": 1538087945790, "ddate": null, "tcdate": 1538087945790, "tmdate": 1683306262482, "tddate": null, "forum": "rJlfzhA9Y7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Distributed Deep Policy Gradient for Competitive Adversarial Environment", "abstract": "This work considers the problem of cooperative learners in partially observable, stochastic environment, receiving feedback in the form of joint reward. The paper presents a flexible multi-agent competitive environment for online training and direct policy performance comparison. This forms a formal problem of a multi-agent Reinforcement Learning (RL) under partial observability, where the goal is to maximize the score performance measured in a direct confrontation. To address the complexity of the problem we propose a distributed deep stochastic policy gradient with individual observations, experience replay, policy transfer, and self-play.", "keywords": ["multi-agent", "partially observable", "reinforcement learning", "deepRL", "self play", "competitive environment"], "authorids": ["deniso2@illinois.edu", "girishc@illinois.edu"], "authors": ["Denis Osipychev", "Girish Chowdhary"], "pdf": "/pdf/73466fd3e435fcb744ab600396b1afc1b4653374.pdf", "paperhash": "osipychev|distributed_deep_policy_gradient_for_competitive_adversarial_environment"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087945816, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "S1VeG309Fm", "original": "rkgxNzCcK7", "number": 1235, "cdate": 1538087944354, "ddate": null, "tcdate": 1538087944354, "tmdate": 1683306262404, "tddate": null, "forum": "S1VeG309Fm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Teaching Machine How to Think by Natural Language: A study on Machine Reading Comprehension", "abstract": "Deep learning ends up as a black box, in which how it makes the decision cannot be directly understood by humans, let alone guide the reasoning process of deep network. In this work, we seek the possibility to guide the learning of network in reading comprehension task by natural language. Two approaches are proposed. In the first approach, the latent representation in the neural network is deciphered into text by a decoder; in the second approach, deep network uses text as latent representation. Human tutor provides ground truth for the output of the decoder or latent representation represented by text. On the bAbI QA tasks, we found that with the guidance on a few examples, the model can achieve the same performance with remarkably less training examples.", "keywords": ["Machine Reading Comprehension"], "authorids": ["ynnekuw@gmail.com", "chaoi111.t@gmail.com"], "authors": ["Tsung Han Wu", "Hung-yi Lee", "Yu Tsao", "ChaoI", "Tuan"], "pdf": "/pdf/cf7f29b037e500917b49650ad1a51ec4b09313af.pdf", "paperhash": "wu|teaching_machine_how_to_think_by_natural_language_a_study_on_machine_reading_comprehension"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087944377, "details": {"replyCount": 1, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SygeznA9YX", "original": "S1ls-2a9KX", "number": 1231, "cdate": 1538087943663, "ddate": null, "tcdate": 1538087943663, "tmdate": 1683306262300, "tddate": null, "forum": "SygeznA9YX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Data Interpretation and Reasoning Over Scientific Plots", "abstract": "Data Interpretation is an important part of Quantitative Aptitude exams and requires an individual to answer questions grounded in plots such as bar charts, line graphs, scatter plots, \\textit{etc}. Recently, there has been an increasing interest in building models which can perform this task by learning from datasets containing triplets of the form \\{plot, question, answer\\}. Two such datasets have been proposed in the recent past which contain plots generated from synthetic data with limited (i) $x-y$ axes variables (ii) question templates and (iii) answer vocabulary and hence do not adequately capture the challenges posed by this task. To overcome these limitations of existing datasets, we introduce a new dataset containing $9.7$ million question-answer pairs grounded over $270,000$ plots with three main differentiators. First, the plots in our dataset contain a wide variety of realistic $x$-$y$ variables such as CO2 emission, fertility rate, \\textit{etc.} extracted from real word data sources such as World Bank, government sites, \\textit{etc}. Second, the questions in our dataset are more complex as they are based on templates extracted from interesting questions asked by a crowd of workers using a fraction of these plots. Lastly, the answers in our dataset are not restricted to a small vocabulary and a large fraction of the answers seen at test time are not present in the training vocabulary. As a result, existing models for Visual Question Answering which largely use end-to-end models in a multi-class classification framework cannot be used for this task. We establish initial results on this dataset and emphasize the complexity of the task using a multi-staged modular pipeline with various sub-components to (i) extract relevant data from the plot and convert it to a semi-structured table (ii) combine the question with this table and use compositional semantic parsing to arrive at a logical form from which the answer can be derived. We believe that such a modular framework is the best way to go forward as it would enable the research community to independently make progress on all the sub-tasks involved in plot question answering.", "keywords": ["VQA", "Data Interpretation", "Parsing", "Object Detection"], "authorids": ["prithag@cse.iitm.ac.in", "nmethani@cse.iitm.ac.in"], "authors": ["Pritha Ganguly", "Nitesh Methani", "Mitesh M. Khapra"], "TL;DR": "We created a new dataset for data interpretation over plots and also propose a baseline for the same.", "pdf": "/pdf/f7650966e80b2718ccdad847dc1649795f4dc44c.pdf", "paperhash": "ganguly|data_interpretation_and_reasoning_over_scientific_plots"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087943689, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rJxA-h05KQ", "original": "BJe7XG05Ym", "number": 1224, "cdate": 1538087942480, "ddate": null, "tcdate": 1538087942480, "tmdate": 1750551533273, "tddate": null, "forum": "rJxA-h05KQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Inhibited Softmax for Uncertainty Estimation in Neural Networks", "abstract": "We present a new method for uncertainty estimation and out-of-distribution detection in neural networks with softmax output. We extend softmax layer with an additional constant input. The corresponding additional output is able to represent the uncertainty of the network. The proposed method requires neither additional parameters nor multiple forward passes nor input preprocessing nor out-of-distribution datasets. We show that our method performs comparably to more computationally expensive methods and outperforms baselines on our experiments from image recognition and sentiment analysis domains.", "keywords": ["uncertainty estimation", "out-of-distribution detection", "inhibited softmax"], "authorids": ["marcin@sigmoidal.io", "msusik@sigmoidal.io", "rafal@sigmoidal.io"], "authors": ["Marcin Mo\u017cejko", "Mateusz Susik", "Rafa\u0142 Karczewski"], "TL;DR": "Uncertainty estimation in a single forward pass without additional learnable parameters.", "pdf": "/pdf/72a13c7c4514a7933774cdb8c012b6fc07f61873.pdf", "paperhash": "moejko|inhibited_softmax_for_uncertainty_estimation_in_neural_networks", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/inhibited-softmax-for-uncertainty-estimation/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087942503, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "S1lCbhAqKX", "original": "B1xB2aaqFQ", "number": 1219, "cdate": 1538087941619, "ddate": null, "tcdate": 1538087941619, "tmdate": 1750551533327, "tddate": null, "forum": "S1lCbhAqKX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Structured Content Preservation for Unsupervised Text Style Transfer", "abstract": "Text style transfer aims to modify the style of a sentence while keeping its content unchanged. Recent style transfer systems often fail to faithfully preserve the content after changing the style. This paper proposes a structured content preserving model that leverages linguistic information in the structured fine-grained supervisions to better preserve the style-independent content \\footnote{Henceforth, we refer to style-independent content as content, for simplicity.} during style transfer. In particular, we achieve the goal by devising rich model objectives based on both the sentence's lexical information and a language model that conditions on content. The resulting model therefore is encouraged to retain the semantic meaning of the target sentences. We perform extensive experiments that compare our model to other existing approaches in the tasks of sentiment and political slant transfer. Our model achieves significant improvement in terms of both content preservation and style transfer in automatic and human evaluation.", "keywords": ["Unsupervised text style transfer"], "authorids": ["yztian@ucdavis.edu", "zhitingh@cs.cmu.edu", "joyu@ucdavis.edu"], "authors": ["Youzhi Tian", "Zhiting Hu", "Zhou Yu"], "pdf": "/pdf/c520ef8b60693cf7f2edc44f109207a3aaa10537.pdf", "paperhash": "tian|structured_content_preservation_for_unsupervised_text_style_transfer", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/structured-content-preservation-for/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087941645, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1liWh09F7", "original": "B1lKC-j9KX", "number": 1204, "cdate": 1538087939058, "ddate": null, "tcdate": 1538087939058, "tmdate": 1750551533472, "tddate": null, "forum": "B1liWh09F7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "SALSA-TEXT : SELF ATTENTIVE LATENT SPACE BASED ADVERSARIAL TEXT GENERATION", "abstract": "Inspired by the success of self attention mechanism and Transformer architecture\nin sequence transduction and image generation applications, we propose novel self\nattention-based architectures to improve the performance of adversarial latent code-\nbased schemes in text generation. Adversarial latent code-based text generation\nhas recently gained a lot of attention due to their promising results. In this paper,\nwe take a step to fortify the architectures used in these setups, specifically AAE\nand ARAE. We benchmark two latent code-based methods (AAE and ARAE)\ndesigned based on adversarial setups. In our experiments, the Google sentence\ncompression dataset is utilized to compare our method with these methods using\nvarious objective and subjective measures. The experiments demonstrate the\nproposed (self) attention-based models outperform the state-of-the-art in adversarial\ncode-based text generation.", "keywords": ["Self-attention", "Transformer", "generative adversarial networks", "GAN", "neural text generation", "NTG", "generative models"], "authorids": ["jgagnonmarchand@gmail.com", "haamed.sadeghi@gmail.com", "mehdi.rezagholizadeh@gmail.com", "md.akmal.haidar@huawei.com"], "authors": ["Jules Gagnon-Marchand", "Hamed Sadeghi", "Mehdi Rezagholizadeh", "Md. Akmal Haider"], "TL;DR": "We propose a self-attention based GAN architecture for unconditional text generation and improve on previous adversarial code-based results.", "pdf": "/pdf/6fa73c4491eb49700d676f0b93ffb13957ac9f6d.pdf", "paperhash": "gagnonmarchand|salsatext_self_attentive_latent_space_based_adversarial_text_generation", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/salsa-text-self-attentive-latent-space-based/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087939082, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "Byl9bhA5F7", "original": "H1e0lGA9Fm", "number": 1195, "cdate": 1538087937532, "ddate": null, "tcdate": 1538087937532, "tmdate": 1683306261924, "tddate": null, "forum": "Byl9bhA5F7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Found by NEMO: Unsupervised Object Detection from Negative Examples and Motion", "abstract": "This paper introduces NEMO, an approach to unsupervised object detection that uses motion---instead of image labels---as a cue to learn object detection. To discriminate between motion of the target object and other changes in the image, it relies on negative examples that show the scene without the object. The required data can be collected very easily by recording two short videos, a positive one showing the object in motion and a negative one showing the scene without the object. Without any additional form of pretraining or supervision and despite of occlusions, distractions, camera motion, and adverse lighting, those videos are sufficient to learn object detectors that can be applied to new videos and even generalize to unseen scenes and camera angles. In a baseline comparison, unsupervised object detection outperforms off-the shelf template matching and tracking approaches that are given an initial bounding box of the object. The learned object representations are also shown to be accurate enough to capture the relevant information from manipulation task demonstrations, which makes them applicable to learning from demonstration in robotics. An example of object detection that was learned from 3 minutes of video can be found here: http://y2u.be/u_jyz9_ETz4", "keywords": ["unsupervised learning", "computer vision", "object detection"], "authorids": ["rjon@google.com"], "authors": ["Rico Jonschkowski"], "TL;DR": "Learning to detect objects without image labels from 3 minutes of video", "pdf": "/pdf/dc309db3eb610e78d83e4f3603a0edc894269939.pdf", "paperhash": "jonschkowski|found_by_nemo_unsupervised_object_detection_from_negative_examples_and_motion"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087937559, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SyxdbnR9YQ", "original": "SJgq1z05KQ", "number": 1183, "cdate": 1538087935529, "ddate": null, "tcdate": 1538087935529, "tmdate": 1683306261925, "tddate": null, "forum": "SyxdbnR9YQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "HANDLING CONCEPT DRIFT IN WIFI-BASED INDOOR LOCALIZATION USING REPRESENTATION LEARNING", "abstract": "We outline the problem of concept drifts for time series data. In this work, we analyze the temporal inconsistency of streaming wireless signals in the context of device-free passive indoor localization. We show that data obtained from WiFi channel state information (CSI) can be used to train a robust system capable of performing room level localization. One of the most challenging issues for such a system is the movement of input data distribution to an unexplored space over time, which leads to an unwanted shift in the learned boundaries of the output space. In this work, we propose a phase and magnitude augmented feature space along with a standardization technique that is little affected by drifts. We show that this robust representation of the data yields better learning accuracy and requires less number of retraining. ", "keywords": ["concept drift", "wifi localization", "feature representation."], "authorids": ["raihan.seraj@mail.mcgill.ca", "negar.gh@aerial.ai", "michel.allegue@aerial.ai"], "authors": ["Raihan Seraj", "Negar Ghourchian", "Michel Allegue-Martinez"], "TL;DR": "We introduce an augmented robust feature space for streaming wifi data that is capable of tackling concept drift for indoor localization", "pdf": "/pdf/e2484cedf687793b77242d6d4f6545dc488d5741.pdf", "paperhash": "seraj|handling_concept_drift_in_wifibased_indoor_localization_using_representation_learning"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087935561, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SJzwb2RcK7", "original": "Bkgp0Ca9Km", "number": 1182, "cdate": 1538087935363, "ddate": null, "tcdate": 1538087935363, "tmdate": 1750551533707, "tddate": null, "forum": "SJzwb2RcK7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Adversarial Decomposition of Text Representation", "abstract": "In this paper, we present a method for adversarial decomposition of text representation. This method can be used to decompose a representation of an input sentence into several independent vectors, where each vector is responsible for a specific aspect of the input sentence. We evaluate the proposed method on two case studies: the conversion between different social registers and diachronic language change. We show that the proposed method is capable of fine-grained con- trolled change of these aspects of the input sentence. For example, our model is capable of learning a continuous (rather than categorical) representation of the style of the sentence, in line with the reality of language use. The model uses adversarial-motivational training and includes a special motivational loss, which acts opposite to the discriminator and encourages a better decomposition. Finally, we evaluate the obtained meaning embeddings on a downstream task of para- phrase detection and show that they are significantly better than embeddings of a regular autoencoder.", "keywords": ["learning representation", "decomposition", "adversarial training", "style transfer"], "authorids": ["jgc128@outlook.com", "arum@cs.uml.edu", "arogers@cs.uml.edu", "david_donahue@student.uml.edu"], "authors": ["Alexey Romanov", "Anna Rumshisky", "Anna Rogers", "David Donahue"], "TL;DR": "A method which learns separate representations for the meaning and the form of a sentence", "pdf": "/pdf/43f023160f6e01b5aa08d7a24336bfb8197abff4.pdf", "paperhash": "romanov|adversarial_decomposition_of_text_representation", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/adversarial-decomposition-of-text/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087935389, "details": {"replyCount": 8, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rygvZ2RcYm", "original": "Hklztd6ctQ", "number": 1179, "cdate": 1538087934850, "ddate": null, "tcdate": 1538087934850, "tmdate": 1683306261611, "tddate": null, "forum": "rygvZ2RcYm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Knowledge Representation for Reinforcement Learning using General Value Functions", "abstract": "Reinforcement learning (RL) is a very powerful approach for learning good control strategies from data. Value functions are a key concept for reinforcement learning, as they guide the search for good policies. A lot of effort has been devoted to designing and improving algorithms for learning value functions. In this paper, we argue that value functions are also a very natural way of providing a framework for knowledge representation for reinforcement learning agents. We show that generalized value functions provide a unifying lens for many algorithms, including policy gradient, successor features, option models and policies, and other forms of hierarchical reinforcement learning. We also demonstrate the potential of this representation to provide new, useful algorithms.", "keywords": ["Reinforcement Learning", "General Value Functions", "Policy Gradient", "Hierarchical Reinforcement Learning", "Successor Features"], "authorids": ["gcomanici@google.com", "doinap@google.com", "andrebarreto@google.com", "kenjitoyama@google.com", "eser@google.com", "hamelphi@google.com", "vezhnick@google.com", "shaobohou@google.com", "shibl@google.com"], "authors": ["Gheorghe Comanici", "Doina Precup", "Andre Barreto", "Daniel Kenji Toyama", "Eser Ayg\u00fcn", "Philippe Hamel", "Sasha Vezhnevets", "Shaobo Hou", "Shibl Mourad"], "pdf": "/pdf/9160fb57c502bf6b206e5a51cf96dc060cb4bdba.pdf", "paperhash": "comanici|knowledge_representation_for_reinforcement_learning_using_general_value_functions"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087934874, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1GHb2RqYX", "original": "BklmACpqt7", "number": 1170, "cdate": 1538087933346, "ddate": null, "tcdate": 1538087933346, "tmdate": 1683306261577, "tddate": null, "forum": "B1GHb2RqYX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "PolyCNN: Learning Seed Convolutional Filters", "abstract": "In this work, we propose the polynomial convolutional neural network (PolyCNN), as a new design of a weight-learning efficient variant of the traditional CNN. The biggest advantage of the PolyCNN is that at each convolutional layer, only one convolutional filter is needed for learning the weights, which we call the seed filter, and all the other convolutional filters are the polynomial transformations of the seed filter, which is termed as an early fan-out. Alternatively, we can also perform late fan-out on the seed filter response to create the number of response maps needed to be input into the next layer. Both early and late fan-out allow the PolyCNN to learn only one convolutional filter at each layer, which can dramatically reduce the model complexity by saving 10x to 50x parameters during learning. While being efficient during both training and testing, the PolyCNN does not suffer performance due to the non-linear polynomial expansion which translates to richer representational power within the convolutional layers. By allowing direct control over model complexity, PolyCNN provides a flexible trade-off between performance and efficiency. We have verified the on-par performance between the proposed PolyCNN and the standard CNN on several visual datasets, such as MNIST, CIFAR-10, SVHN, and ImageNet.", "keywords": ["Efficient CNN", "Seed convolutional filter"], "authorids": ["juefei.xu@gmail.com", "vishnu@msu.edu", "msavvide@ri.cmu.edu"], "authors": ["Felix Juefei-Xu", "Vishnu Naresh Boddeti", "Marios Savvides"], "TL;DR": "PolyCNN only needs to learn one seed convolutional filter at each layer. This is an efficient variant of traditional CNN, with on-par performance.", "pdf": "/pdf/eab96709ff84ca55714b348bfcb4a9b1c82060b8.pdf", "paperhash": "juefeixu|polycnn_learning_seed_convolutional_filters"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087933371, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rylBZ305KQ", "original": "rJx8nCp9Y7", "number": 1167, "cdate": 1538087932829, "ddate": null, "tcdate": 1538087932829, "tmdate": 1683306261483, "tddate": null, "forum": "rylBZ305KQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Modeling Evolution of Language Through Time with Neural Networks", "abstract": "Language evolves over time with trends and shifts in technological, political, or cultural contexts. Capturing these variations is important to develop better language models. While recent works tackle temporal drifts by learning diachronic embeddings, we instead propose to integrate a temporal component into a recurrent language model. It takes the form of global latent variables, which are structured in time by a learned non-linear transition function. We perform experiments on three time annotated corpora. Experimental results on language modeling and classification tasks show that our model performs consistently better than temporal word embedding methods in two temporal evaluation settings: prediction and modeling. Moreover, we empirically show that the system is able to predict informative latent states in the future.", "keywords": ["language modeling", "variational inference", "dynamic model", "temporal data", "deep learning"], "authorids": ["edouard.delasalles@lip6.fr", "sylvain.lamprier@lip6.fr", "ludovic.denoyer@lip6.fr"], "authors": ["Edouard Delasalles", "Sylvain Lamprier", "Ludovic Denoyer"], "pdf": "/pdf/a895328d9f331bc00bcf1729f5d6378a85d0dc60.pdf", "paperhash": "delasalles|modeling_evolution_of_language_through_time_with_neural_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087932853, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HJeEWnR9F7", "original": "HygFmHfYtm", "number": 1164, "cdate": 1538087932329, "ddate": null, "tcdate": 1538087932329, "tmdate": 1750551533814, "tddate": null, "forum": "HJeEWnR9F7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Scaling up Deep Learning for PDE-based Models", "abstract": "Across numerous applications, forecasting relies on numerical solvers for partial differential equations (PDEs). Although the use of deep-learning techniques has been proposed, the uses have been restricted by the fact the training data are obtained using PDE solvers. Thereby, the uses were limited to domains, where the PDE solver was applicable, but no further. \n\nWe present methods for training on small domains, while applying the trained models on larger domains, with consistency constraints ensuring the solutions are physically meaningful even at the boundary of the small domains. We demonstrate the results on an air-pollution forecasting model for Dublin, Ireland.", "keywords": ["recurrent neural networks", "partial differential equation", "domain decomposition", "consistency constraints", "advection", "diffusion"], "authorids": ["haehnel@maths.tcd.ie", "julien.monteil@ie.ibm.com", "jakub.marecek@ie.ibm.com", "feardonn@ie.ibm.com"], "authors": ["Philipp Haehnel", "Julien Monteil", "Jakub Marecek", "Fearghal O'Donncha"], "TL;DR": "We present RNNs for training surrogate models of PDEs, wherein consistency constraints ensure the solutions are physically meaningful, even when the training uses much smaller domains than the trained model is applied to.", "pdf": "/pdf/8701bc2d7a61b4324c361822b5ec940be4b02706.pdf", "paperhash": "haehnel|scaling_up_deep_learning_for_pdebased_models", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/scaling-up-deep-learning-for-pde-based-models/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087932353, "details": {"replyCount": 1, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1gWWh05Y7", "original": "SyxAOwa9KQ", "number": 1145, "cdate": 1538087929185, "ddate": null, "tcdate": 1538087929185, "tmdate": 1683306261315, "tddate": null, "forum": "B1gWWh05Y7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Exploration in Policy Mirror Descent", "abstract": "Policy optimization is a core problem in reinforcement learning. In this paper, we investigate Reversed Entropy Policy Mirror Descent (REPMD), an on-line policy optimization strategy that improves exploration behavior while assuring monotonic progress in a principled objective. REPMD conducts a form of maximum entropy exploration within a mirror descent framework, but uses an alternative policy update with a reversed KL projection. This modified formulation bypasses undesirable mode seeking behavior and avoids premature convergence to sub-optimal policies, while still supporting strong theoretical properties such as guaranteed policy improvement. An experimental evaluation demonstrates that this approach significantly improves practical exploration and surpasses the empirical performance of state-of-the art policy optimization methods in a set of benchmark tasks.", "keywords": ["Reinforcement Learning", "Exploration", "Policy Optimization"], "authorids": ["jmei2@ualberta.ca", "chenjun@ualberta.ca", "ruitong.huang@borealisai.com", "daes@ualberta.ca", "mmueller@ualberta.ca"], "authors": ["Jincheng Mei", "Chenjun Xiao", "Ruitong Huang", "Dale Schuurmans", "Martin Muller"], "pdf": "/pdf/ec568121b7663f6ad015f33c0be2cea2562db6bc.pdf", "paperhash": "mei|exploration_in_policy_mirror_descent"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087929208, "details": {"replyCount": 1, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HkGb-3C5t7", "original": "r1lGgStcFm", "number": 1144, "cdate": 1538087929006, "ddate": null, "tcdate": 1538087929006, "tmdate": 1683306261165, "tddate": null, "forum": "HkGb-3C5t7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "withdrawn", "abstract": "withdrawn", "keywords": [], "authorids": ["aaron.chadha.14@ucl.ac.uk", "i.andreopoulos@ucl.ac.uk"], "authors": ["withdrawn"], "TL;DR": " ", "pdf": "/pdf/1f2f95e82d24dfd0e65a9aa2ab91d8ecc8664a49.pdf", "paperhash": "withdrawn|withdrawn"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087929031, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1eWW2RqFX", "original": "r1gC5b09Fm", "number": 1142, "cdate": 1538087928663, "ddate": null, "tcdate": 1538087928663, "tmdate": 1750551533964, "tddate": null, "forum": "r1eWW2RqFX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "PointGrow: Autoregressively Learned Point Cloud Generation with Self-Attention", "abstract": "A point cloud is an agile 3D representation, efficiently modeling an object's surface geometry. However, these surface-centric properties also pose challenges on designing tools to recognize and synthesize point clouds. This work presents a novel autoregressive model, PointGrow, which generates realistic point cloud samples from scratch or conditioned from given semantic contexts. Our model operates recurrently, with each point sampled according to a conditional distribution given its previously-generated points. Since point cloud object shapes are typically encoded by long-range interpoint dependencies, we augment our model with dedicated self-attention modules to capture these relations. Extensive evaluation demonstrates that PointGrow achieves satisfying performance on both unconditional and conditional point cloud generation tasks, with respect to fidelity, diversity and semantic preservation. Further, conditional PointGrow learns a smooth manifold of given images where 3D shape interpolation and arithmetic calculation can be performed inside.", "keywords": ["point cloud generation", "autoregressive models", "self-attention"], "authorids": ["yb_sun@mit.edu", "yuewang@csail.mit.edu", "zwliu.hust@gmail.com", "j_siegel@mit.edu", "sesarma@mit.edu"], "authors": ["Yongbin Sun", "Yue Wang", "Ziwei Liu", "Joshua E. Siegel", "Sanjay Sarma"], "TL;DR": "An autoregressive deep learning model for generating diverse point clouds.", "pdf": "/pdf/462e50728f340bfda0a678c6c987050199c82f4e.pdf", "paperhash": "sun|pointgrow_autoregressively_learned_point_cloud_generation_with_selfattention", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/pointgrow-autoregressively-learned-point/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087928687, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rylRgh0qK7", "original": "rJgCTJA4FQ", "number": 1128, "cdate": 1538087926320, "ddate": null, "tcdate": 1538087926320, "tmdate": 1683306261024, "tddate": null, "forum": "rylRgh0qK7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "NA", "abstract": "NA", "keywords": ["NA"], "authorids": ["cqpcurry@gmail.com", "penny.ling.pan@gmail.com", "kenshinping@gmail.com"], "authors": ["Qingpeng Cai", "Ling Pan", "Pingzhong Tang"], "pdf": "/pdf/49251da84fa69549ca7cda56cd5a06d978d48056.pdf", "paperhash": "cai|na"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087926343, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "H1Gnx2CqKQ", "original": "H1l3Oxn5F7", "number": 1113, "cdate": 1538087923848, "ddate": null, "tcdate": 1538087923848, "tmdate": 1683306261025, "tddate": null, "forum": "H1Gnx2CqKQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Hiding Objects from Detectors: Exploring Transferrable Adversarial Patterns", "abstract": "Adversaries in neural networks have drawn much attention since their first debut. \nWhile most existing methods aim at deceiving image classification models into misclassification or crafting attacks for specific object instances in the object setection tasks, we focus on creating universal adversaries to fool object detectors and hide objects from the detectors. \nThe adversaries we examine are universal in three ways: \n(1) They are not specific for specific object instances; \n(2) They are image-independent; \n(3) They can further transfer to different unknown models. \nTo achieve this, we propose two novel techniques to improve the transferability of the adversaries: \\textit{piling-up} and \\textit{monochromatization}. \nBoth techniques prove to simplify the patterns of generated adversaries, and ultimately result in higher transferability. ", "keywords": ["adversarial", "object detection"], "authorids": ["longlongsb@pku.edu.cn", "jie.fu@polymtl.ca", "christopher.pal@polymtl.ca"], "authors": ["Shangbang Long", "Jie Fu", "Chris Pal"], "TL;DR": "We focus on creating universal adversaries to fool object detectors and hide objects from the detectors. ", "pdf": "/pdf/149f561d0eefc9662e0400f8b3dbb345c6cab4e3.pdf", "paperhash": "long|hiding_objects_from_detectors_exploring_transferrable_adversarial_patterns"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087923872, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1ejxnCctX", "original": "Hkl9eOnctm", "number": 1107, "cdate": 1538087922854, "ddate": null, "tcdate": 1538087922854, "tmdate": 1750551534398, "tddate": null, "forum": "r1ejxnCctX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Representation Flow for Action Recognition", "abstract": "In this paper, we propose a convolutional layer inspired by optical flow algorithms to learn motion representations. Our representation flow layer is a fully-differentiable layer designed to capture the `flow' of any representation channel within a convolutional neural network for action recognition. Its parameters for iterative flow optimization are learned in an end-to-end fashion together with the other model parameters, maximizing the action recognition performance. Furthermore, we newly introduce the concept of learning `flow of flow' representations by stacking multiple representation flow layers. We conducted extensive experimental evaluations, confirming its advantages over previous recognition models using traditional optical flows in both computational speed and performance.\n", "keywords": [], "authorids": ["ajpiergi@indiana.edu", "mryoo@indiana.edu"], "authors": ["AJ Piergiovanni", "Michael S. Ryoo"], "pdf": "/pdf/31c7c9e1d53b6f3a57ba1f20b989e0220834e43e.pdf", "paperhash": "piergiovanni|representation_flow_for_action_recognition", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 5 code implementations](https://www.catalyzex.com/paper/representation-flow-for-action-recognition/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087922880, "details": {"replyCount": 16, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1xce3ActX", "original": "HJxztehcYm", "number": 1100, "cdate": 1538087921693, "ddate": null, "tcdate": 1538087921693, "tmdate": 1683306260851, "tddate": null, "forum": "r1xce3ActX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "N/A", "abstract": "N/A", "keywords": ["N/A"], "authorids": ["samitha.herath@data61.csiro.au", "u5505348@anu.edu.au", "mehrtash.harandi@monash.edu"], "authors": ["N/A"], "TL;DR": "N/A", "pdf": "/pdf/4bea1fed325d0018ffcc62d1fc3e8adb19ca28b6.pdf", "paperhash": "na|na"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087921718, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJgWl3A5YX", "original": "rkgSDoccKm", "number": 1047, "cdate": 1538087912673, "ddate": null, "tcdate": 1538087912673, "tmdate": 1683306260709, "tddate": null, "forum": "BJgWl3A5YX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "The Body is not a Given: Joint Agent Policy Learning and Morphology Evolution", "abstract": "Reinforcement learning (RL) has proven to be a powerful paradigm for deriving complex behaviors from simple reward signals in a wide range of environments. When applying RL to continuous control agents in simulated physics environments, the body is usually considered to be part of the environment. However, during evolution the physical body of biological organisms and their controlling brains are co-evolved, thus exploring a much larger space of actuator/controller configurations. Put differently, the intelligence does not reside only in the agent's mind, but also in the design of their body. \nWe propose a method for uncovering strong agents, consisting of a good combination of a body and policy, based on combining RL with an evolutionary procedure. Given the resulting agent, we also propose an approach for identifying the body changes that contributed the most to the agent performance. We use the Shapley value from cooperative game theory to find the fair contribution of individual components, taking into account synergies between components. \nWe evaluate our methods in an environment similar to the the recently proposed Robo-Sumo task, where agents in a 3D environment with simulated physics compete in tipping over their opponent or pushing them out of the arena. Our results show that the proposed methods are indeed capable of generating strong agents, significantly outperforming baselines that focus on optimizing the agent policy alone. \n\nA video is available at: www.youtube.com/watch?v=eei6Rgom3YY", "keywords": ["Reinforcement Learning", "Continuous Control", "Evolutionary Computation", "Genetic Algorithms", "Evolving Morphology", "Baldwin Effect", "Population Based Training"], "authorids": ["dylski@google.com", "yorambac@google.com", "guylever@google.com", "heess@google.com", "pushmeet@google.com", "liusiqi@google.com", "chrisantha@google.com", "thore@google.com"], "authors": ["Dylan Banarse", "Yoram Bachrach", "Siqi Liu", "Chrisantha Fernando", "Nicolas Heess", "Pushmeet Kohli", "Guy Lever", "Thore Graepel"], "TL;DR": "Evolving the shape of the body in RL controlled agents improves their performance (and help learning)", "pdf": "/pdf/e0cb762e4788adb7e795f442d9f3b13bc2a17768.pdf", "paperhash": "banarse|the_body_is_not_a_given_joint_agent_policy_learning_and_morphology_evolution"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087912699, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rJegl2C9K7", "original": "S1eiGjCvYQ", "number": 1042, "cdate": 1538087911742, "ddate": null, "tcdate": 1538087911742, "tmdate": 1750551534283, "tddate": null, "forum": "rJegl2C9K7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Feature Matters: A Stage-by-Stage Approach for Task Independent Knowledge Transfer", "abstract": "Convolutional Neural Networks (CNNs) become deeper and deeper in recent years, making the study of model acceleration imperative. It is a common practice to employ a shallow network, called student, to learn from a deep one, which is termed as teacher. Prior work made many attempts to transfer different types of knowledge from teacher to student, however, there are two problems remaining unsolved. Firstly, the knowledge used by existing methods is highly dependent on task and dataset, limiting their applications. Secondly, there lacks an effective training scheme for the transfer process, leading to degradation of performance. In this work, we argue that feature is the most important knowledge from teacher. It is sufficient for student to just learn good features regardless of the target task. From this discovery, we further present an efficient learning strategy to mimic features stage by stage. Extensive experiments demonstrate the importance of features and show that the proposed approach significantly narrows down the gap between student and teacher, outperforming the state-of-the-art methods.\n", "keywords": ["knowledge transfer", "task independent", "feature transfer", "stage-by-stage"], "authorids": ["daisy@tju.edu.cn", "sy116@ie.cuhk.edu.hk", "liquanquan@sensetime.com", "lwan@tju.edu.cn", "xtang@ie.cuhk.edu.hk"], "authors": ["Mengya Gao", "Yujun Shen", "Quanquan Li", "Liang Wan", "Xiaoou Tang"], "TL;DR": "This paper proposes to transfer knowledge from deep model to shallow one by mimicking features stage by stage.", "pdf": "/pdf/06729f67091f583f6caef1c72f89e598b8f957cf.pdf", "paperhash": "gao|feature_matters_a_stagebystage_approach_for_task_independent_knowledge_transfer", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/feature-matters-a-stage-by-stage-approach-for/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087911768, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BklAyh05YQ", "original": "B1ekjLsqFm", "number": 1031, "cdate": 1538087909768, "ddate": null, "tcdate": 1538087909768, "tmdate": 1683306260623, "tddate": null, "forum": "BklAyh05YQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Neural Network Bandit Learning by Last Layer Marginalization", "abstract": "We propose a new method for training neural networks online in a bandit setting. Similar to prior work, we model the uncertainty only in the last layer of the network, treating the rest of the network as a feature extractor. This allows us to successfully balance between exploration and exploitation due to the efficient, closed-form uncertainty estimates available for linear models. To train the rest of the network, we take advantage of the posterior we have over the last layer, optimizing over all values in the last layer distribution weighted by probability. We derive a closed form, differential approximation to this objective and show empirically over various models and datasets that training the rest of the network in this fashion leads to both better online and offline performance when compared to other methods.", "keywords": ["Bandit learning", "online learning", "contextual bandits", "neural network learning in online settings"], "authorids": ["nwweber@cs.stonybrook.edu", "janez.j.starc@gmail.com", "arpit.mittal@yahoo.com", "roiblan@amazon.es", "lluismv@amazon.es"], "authors": ["Noah Weber", "Janez Starc", "Arpit Mittal", "Roi Blanco", "Lluis Marquez"], "TL;DR": "This paper proposes a new method for neural network learning in online bandit settings by marginalizing over the last layer", "pdf": "/pdf/e76f30548510499f3efd68bfb420d907d15aa121.pdf", "paperhash": "weber|neural_network_bandit_learning_by_last_layer_marginalization"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087909796, "details": {"replyCount": 1, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJf9k305Fm", "original": "HkgmxpKqKQ", "number": 1009, "cdate": 1538087905864, "ddate": null, "tcdate": 1538087905864, "tmdate": 1683306260498, "tddate": null, "forum": "BJf9k305Fm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Visualizing and Discovering Behavioural Weaknesses in Deep Reinforcement Learning", "abstract": "As deep reinforcement learning is being applied to more and more tasks, there is a growing need to better understand and probe the learned agents. Visualizing and understanding the decision making process can be very valuable to comprehend and identify problems in the learned behavior. However, this topic has been relatively under-explored in the reinforcement learning community. In this work we present a method for synthesizing states of interest for a trained agent. Such states could be situations (e.g. crashing or damaging a car) in which specific actions are necessary. Further, critical states in which a very high or a very low reward can be achieved (e.g. risky states) are often interesting to understand the situational awareness of the system. To this end, we learn a generative model over the state space of the environment and use its latent space to optimize a target function for the state of interest. In our experiments we show that this method can generate insightful visualizations for a variety of environments and reinforcement learning methods. We explore these issues in the standard Atari benchmark games as well as in an autonomous driving simulator. Based on the efficiency with which we have been able to identify significant decision scenarios with this technique, we believe this general approach could serve as an important tool for AI safety applications.", "keywords": ["Visualization", "Deep Reinforcement Learning"], "authorids": ["christian.rupprecht@in.tum.de", "cyril.ibrahim@elementai.com", "christopher.pal@polymtl.ca"], "authors": ["Christian Rupprecht", "Cyril Ibrahim", "Chris Pal"], "TL;DR": "We present a method to synthesize states of interest for reinforcement learning agents in order to analyze their behavior. ", "pdf": "/pdf/031b1e528623d3b1cb5273e2cef20bd448bb5e26.pdf", "paperhash": "rupprecht|visualizing_and_discovering_behavioural_weaknesses_in_deep_reinforcement_learning"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087905888, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "S1eKJ3R5KQ", "original": "r1l_6Bh9tm", "number": 1002, "cdate": 1538087904666, "ddate": null, "tcdate": 1538087904666, "tmdate": 1750551534399, "tddate": null, "forum": "S1eKJ3R5KQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Answer-based Adversarial Training for Generating Clarification Questions", "abstract": "We propose a generative adversarial training approach for the problem of clarification question generation. Our approach generates clarification questions with the goal of eliciting new information that would make the given context more complete. We develop a Generative Adversarial Network (GAN) where the generator is a sequence-to-sequence model and the discriminator is a utility function that models the value of updating the context with the answer to the clarification question. We evaluate on two datasets, using both automatic metrics and human judgments of usefulness, specificity and relevance, showing that our approach outperforms both a retrieval-based model and ablations that exclude the utility model and the adversarial training.\n", "keywords": ["natural language processing", "text generation", "generative adversarial network"], "authorids": ["raosudha@cs.umd.edu", "hal@umiacs.umd.edu"], "authors": ["Sudha Rao", "Hal Daum\u00e9 III"], "TL;DR": "We propose an adversarial training approach to the problem of clarification question generation which uses the answer to the question to model the reward. ", "pdf": "/pdf/d5a432da55cdc0f50e0c6ec4e642e57ca3abda5e.pdf", "paperhash": "rao|answerbased_adversarial_training_for_generating_clarification_questions", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/answer-based-adversarial-training-for/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087904690, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "S1fNJhRqFX", "original": "S1xcUkA9FQ", "number": 979, "cdate": 1538087900277, "ddate": null, "tcdate": 1538087900277, "tmdate": 1683306260348, "tddate": null, "forum": "S1fNJhRqFX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Exploration using Distributional RL and UCB", "abstract": " We establish the relation between Distributional RL and the Upper Confidence Bound (UCB) approach to exploration.\n In this paper we show that the density of the Q function estimated by Distributional RL can be successfully used for the estimation of UCB. This approach does not require counting and, therefore, generalizes well to the Deep RL. We also point to the asymmetry of the empirical densities estimated by the Distributional RL algorithms like QR-DQN. This observation leads to the reexamination of the variance's performance in the UCB type approach to exploration. We introduce truncated variance as an alternative estimator of the UCB and a novel algorithm based on it. We empirically show that newly introduced algorithm achieves better performance in multi-armed bandits setting. Finally, we extend this approach to high-dimensional setting and test it on the Atari 2600 games. New approach achieves better performance compared to QR-DQN in 26 of games, 13 ties out of 49 games.", "keywords": ["Distributional RL", "UCB", "exploration", "Atari 2600", "multi-armed bandits"], "authorids": ["mavrin@ualberta.ca", "hengshuai.yao@huawei.com", "lkong@ualberta.ca", "zhangshangtong.cpp@gmail.com"], "authors": ["Borislav Mavrin", "Hengshuai Yao", "Linglong Kong", "ShangtongZhang"], "TL;DR": "Exploration using Distributional RL and truncagted variance.", "pdf": "/pdf/c848962619a0820a6a346f72a374a44d821def86.pdf", "paperhash": "mavrin|exploration_using_distributional_rl_and_ucb"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087900303, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJxfJnC9YX", "original": "HyexZ_h9FQ", "number": 969, "cdate": 1538087898491, "ddate": null, "tcdate": 1538087898491, "tmdate": 1683306260255, "tddate": null, "forum": "BJxfJnC9YX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Learning Spatio-Temporal Representations Using Spike-Based Backpropagation", "abstract": "Spiking neural networks (SNNs) offer a promising alternative to current artificial neural networks to enable low-power event-driven neuromorphic hardware. However, training SNNs remains a challenge primarily because of the complex non-differentiable neuronal behavior arising from their spike-based computation. In this paper, we propose an algorithm to train spiking autoencoders on regenerative learning tasks. A sigmoid approximation is used in place of the Leaky Integrate-and-Fire neuron's threshold based activation during backpropagation to enable differentiability. The loss is computed on the membrane potential of the output layer, which is then backpropagated through the network at each time step. These spiking autoencoders learn meaningful spatio-temporal representations of the data, across two modalities - audio and visual. We demonstrate audio to image synthesis in a spike-based environment by sharing these spatio-temporal representations between the two modalities. These models achieve very low reconstruction loss, comparable to ANNs, on MNIST and Fashion-MNIST datasets, and while converting TI-46 digits audio samples to MNIST images. ", "keywords": ["spiking neural networks", "autoencoders", "representation learning", "backpropagation", "multimodal"], "authorids": ["roy77@purdue.edu", "pandap@purdue.edu", "kaushik@purdue.edu"], "authors": ["Deboleena Roy", "Priyadarshini Panda", "Kaushik Roy"], "pdf": "/pdf/820d782a1eb38bc17b02b942b7d13c7bdf1d6648.pdf", "paperhash": "roy|learning_spatiotemporal_representations_using_spikebased_backpropagation"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087898514, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SJgzJh0qtQ", "original": "HketzJA5Ym", "number": 965, "cdate": 1538087897812, "ddate": null, "tcdate": 1538087897812, "tmdate": 1683306260113, "tddate": null, "forum": "SJgzJh0qtQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "A SINGLE SHOT PCA-DRIVEN ANALYSIS OF NETWORK STRUCTURE TO REMOVE REDUNDANCY", "abstract": "Deep learning models have outperformed traditional methods in many fields such\nas natural language processing and computer vision. However, despite their\ntremendous success, the methods of designing optimal Convolutional Neural Networks\n(CNNs) are still based on heuristics or grid search. The resulting networks\nobtained using these techniques are often overparametrized with huge computational\nand memory requirements. This paper focuses on a structured, explainable\napproach towards optimal model design that maximizes accuracy while keeping\ncomputational costs tractable. We propose a single-shot analysis of a trained CNN\nthat uses Principal Component Analysis (PCA) to determine the number of filters\nthat are doing significant transformations per layer, without the need for retraining.\nIt can be interpreted as identifying the dimensionality of the hypothesis space\nunder consideration. The proposed technique also helps estimate an optimal number\nof layers by looking at the expansion of dimensions as the model gets deeper.\nThis analysis can be used to design an optimal structure of a given network on\na dataset, or help to adapt a predesigned network on a new dataset. We demonstrate\nthese techniques by optimizing VGG and AlexNet networks on CIFAR-10,\nCIFAR-100 and ImageNet datasets.", "keywords": ["deep learning", "model compression", "pruning", "PCA"], "authorids": ["gargi@purdue.edu", "pandap@purdue.edu", "kaushik@purdue.edu"], "authors": ["Isha Garg", "Priyadarshini Panda", "Kaushik Roy"], "TL;DR": "We present a single shot analysis of a trained neural network to remove redundancy and identify optimal network structure", "pdf": "/pdf/24bdde0b474f890711e4dc02615ca038609a17f5.pdf", "paperhash": "garg|a_single_shot_pcadriven_analysis_of_network_structure_to_remove_redundancy"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087897836, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "H1zW13R5tm", "original": "BJlWMAbdKX", "number": 962, "cdate": 1538087897306, "ddate": null, "tcdate": 1538087897306, "tmdate": 1683306260025, "tddate": null, "forum": "H1zW13R5tm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Bamboo: Ball-Shape Data Augmentation Against Adversarial Attacks from All Directions", "abstract": "Deep neural networks (DNNs) are widely adopted in real-world cognitive applications because of their high accuracy. The robustness of DNN models, however, has been recently challenged by adversarial attacks where small disturbance on input samples may result in misclassification. State-of-the-art defending algorithms, such as adversarial training or robust optimization, improve DNNs' resilience to adversarial attacks by paying high computational costs. Moreover, these approaches are usually designed to defend one or a few known attacking techniques only. The effectiveness to defend other types of attacking methods, especially those that have not yet been discovered or explored, cannot be guaranteed. This work aims for a general approach of enhancing the robustness of DNN models under adversarial attacks. In particular, we propose Bamboo -- the first data augmentation method designed for improving the general robustness of DNN without any hypothesis on the attacking algorithms. Bamboo augments the training data set with a small amount of data uniformly sampled on a fixed radius ball around each training data and hence, effectively increase the distance between natural data points and decision boundary. Our experiments show that Bamboo substantially improve the general robustness against arbitrary types of attacks and noises, achieving better results comparing to previous adversarial training methods, robust optimization methods and other data augmentation methods with the same amount of data points.", "keywords": ["DNN robustness", "Adversarial attack", "Data augmentation"], "authorids": ["huanrui.yang@duke.edu", "jingchi.zhang@duke.edu", "hc218@duke.edu", "wenhanw@microsoft.com", "yiran.chen@duke.edu", "hai.li@duke.edu"], "authors": ["Huanrui Yang", "Jingchi Zhang", "Hsin-Pai Cheng", "Wenhan Wang", "Yiran Chen", "Hai Li"], "TL;DR": "The first data augmentation method specially designed for improving the general robustness of DNN without any hypothesis on the attacking algorithms.", "pdf": "/pdf/6896bc9431ad602b5f7330cd8759094e753c037b.pdf", "paperhash": "yang|bamboo_ballshape_data_augmentation_against_adversarial_attacks_from_all_directions"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087897329, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SJl11nCctX", "original": "SygjnkmcF7", "number": 946, "cdate": 1538087894564, "ddate": null, "tcdate": 1538087894564, "tmdate": 1683306259964, "tddate": null, "forum": "SJl11nCctX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "TFGAN: Improving Conditioning for Text-to-Video Synthesis", "abstract": "Developing conditional generative models for text-to-video synthesis is an extremely challenging yet an important topic of research in machine learning. In this work, we address this problem by introducing Text-Filter conditioning Generative Adversarial Network (TFGAN), a GAN model with novel conditioning scheme that aids improving the text-video associations. With a combination of this conditioning scheme and a deep GAN architecture, TFGAN generates photo-realistic videos from text on very challenging real-world video datasets. In addition, we construct a benchmark synthetic dataset of moving shapes to systematically evaluate our conditioning scheme. Extensive experiments demonstrate that TFGAN significantly outperforms the existing approaches, and can also generate videos of novel categories not seen during training.\n", "keywords": ["Conditional GAN", "Video Generation", "Text-to-Video Synthesis", "Conditional Generative Models", "Deep Generative Models"], "authorids": ["yogesh@cs.umd.edu", "renqiang@nec-labs.com", "bbai@nec-labs.com", "rama@umiacs.umd.edu", "hpg@nec-labs.com"], "authors": ["Yogesh Balaji", "Martin Renqiang Min", "Bing Bai", "Rama Chellappa", "Hans Peter Graf"], "TL;DR": "An effective text-conditioning GAN framework for generating videos from text", "pdf": "/pdf/059c7c20d075a8066b344f47beab9a6724fb7cb3.pdf", "paperhash": "balaji|tfgan_improving_conditioning_for_texttovideo_synthesis"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087894588, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1GaAjRcF7", "original": "HJlzwzQqKm", "number": 940, "cdate": 1538087893486, "ddate": null, "tcdate": 1538087893486, "tmdate": 1683306259857, "tddate": null, "forum": "r1GaAjRcF7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Differentiable Greedy Networks", "abstract": "Optimal selection of a subset of items from a given set is a hard problem that requires combinatorial optimization. In this paper, we propose a subset selection algorithm that is trainable with gradient based methods yet achieves near optimal performance via submodular optimization. We focus on the task of identifying a relevant set of sentences for claim verification in the context of the FEVER task. Conventional methods for this task look at sentences on their individual merit and thus do not optimize the informativeness of sentences as a set. We show that our proposed method which builds on the idea of unfolding a greedy algorithm into a computational graph allows both interpretability and gradient based training. The proposed differentiable greedy network (DGN) outperforms discrete optimization algorithms as well as other baseline methods in terms of precision and recall.", "keywords": ["submodular optimization", "fact verification", "differentiable module", "deep unfolding"], "authorids": ["tcpowers@uw.edu", "rasool.fakoor@mavs.uta.edu", "siamaks@amazon.com", "sethya@amazon.com", "amanjitsingh.kainth@mail.utoronto.ca", "asamir@cs.toronto.edu", "rsarikay@amazon.com"], "authors": ["Thomas Powers", "Rasool Fakoor", "Siamak Shakeri", "Abhinav Sethy", "Amanjit Kainth", "Abdel-rahman Mohamed", "Ruhi Sarikaya"], "TL;DR": "We propose a subset selection algorithm that is trainable with gradient based methods yet achieves near optimal performance via submodular optimization.", "pdf": "/pdf/5e93c38d258922486d2cdf828ab23437b41c517e.pdf", "paperhash": "powers|differentiable_greedy_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087893512, "details": {"replyCount": 8, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1enCo0cK7", "original": "HylWXNj5Y7", "number": 933, "cdate": 1538087892218, "ddate": null, "tcdate": 1538087892218, "tmdate": 1683306259805, "tddate": null, "forum": "B1enCo0cK7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "One Bit Matters: Understanding Adversarial Examples as the Abuse of Redundancy", "abstract": "Adversarial examples have somewhat disrupted the enormous success of machine learning (ML) and are causing concern with regards to its trustworthiness: A small perturbation of an input results in an arbitrary failure of an otherwise seemingly well-trained ML system. While studies are being conducted to discover the intrinsic properties of adversarial examples, such as their transferability and universality, there is insufficient theoretic analysis to help understand the phenomenon in a way that can influence the design process of ML experiments. In this paper, we deduce an information-theoretic model which explains adversarial attacks universally as the abuse of feature redundancies in ML algorithms. We prove that feature redundancy is a necessary condition for the existence of adversarial examples. Our model helps to explain the major questions raised in many anecdotal studies on adversarial examples. Our theory is backed up by empirical measurements of the information content of benign and adversarial examples on both image and text datasets. Our measurements show that typical adversarial examples introduce just enough redundancy to overflow the decision making of a machine learner trained on corresponding benign examples. We conclude with actionable recommendations to improve the robustness of machine learners against adversarial examples.", "keywords": ["adversarial examples", "information theory", "robust neural networks"], "authorids": ["wangjksjtu_01@sjtu.edu.cn", "ruoxijia@berkeley.edu", "fractor@eecs.berkeley.edu", "lxbosky@gmail.com", "spanos@berkeley.edu"], "authors": ["Jingkang Wang", "Ruoxi Jia", "Gerald Friedland", "Bo Li", "Costas Spanos"], "TL;DR": "A new theoretical explanation for the existence of adversarial examples", "pdf": "/pdf/798ae8f944a2e7cd9542845816f87e35fbb44be9.pdf", "paperhash": "wang|one_bit_matters_understanding_adversarial_examples_as_the_abuse_of_redundancy"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087892243, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rkgs0oAqFQ", "original": "BJeRQ_nqYX", "number": 928, "cdate": 1538087891356, "ddate": null, "tcdate": 1538087891356, "tmdate": 1750551534842, "tddate": null, "forum": "rkgs0oAqFQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Rethinking Knowledge Graph Propagation for Zero-Shot Learning", "abstract": "Graph convolutional neural networks have recently shown great potential for the task of zero-shot learning. These models are highly sample efficient as related concepts in the graph structure share statistical strength allowing generalization to new classes when faced with a lack of data. However, we find that the extensive use of Laplacian smoothing at each layer in current approaches can easily dilute the knowledge from distant nodes and consequently decrease the performance in zero-shot learning. In order to still enjoy the benefit brought by the graph structure while preventing the dilution of knowledge from distant nodes, we propose a Dense Graph Propagation (DGP) module with carefully designed direct links among distant nodes. DGP allows us to exploit the hierarchical graph structure of the knowledge graph through additional connections. These connections are added based on a node's relationship to its ancestors and descendants. A weighting scheme is further used to weigh their contribution depending on the distance to the node. Combined with finetuning of the representations in a two-stage training approach our method outperforms state-of-the-art zero-shot learning approaches.", "keywords": ["Dense graph propagation", "zero-shot learning"], "authorids": ["michael.c.kampffmeyer@uit.no", "cyvius96@gmail.com", "xdliang328@gmail.com", "hwang87@mit.edu", "zhangyujia2014@ia.ac.cn", "epxing@cs.cmu.edu"], "authors": ["Michael Kampffmeyer", "Yinbo Chen", "Xiaodan Liang", "Hao Wang", "Yujia Zhang", "Eric P. Xing"], "TL;DR": "We rethink the way information can be exploited more efficiently in the knowledge graph in order to improve performance on the Zero-Shot Learning task and propose a dense graph propagation (DGP) module for this purpose.", "pdf": "/pdf/e2bc39d191235b44aa49d8a22bff2ef1f2e5e214.pdf", "paperhash": "kampffmeyer|rethinking_knowledge_graph_propagation_for_zeroshot_learning", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/rethinking-knowledge-graph-propagation-for/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087891382, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "ryxsCiAqKm", "original": "ryepiKs5YQ", "number": 927, "cdate": 1538087891179, "ddate": null, "tcdate": 1538087891179, "tmdate": 1683306259623, "tddate": null, "forum": "ryxsCiAqKm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Spectral Convolutional Networks on Hierarchical Multigraphs", "abstract": "Spectral Graph Convolutional Networks (GCNs) are a generalization of convolutional networks to learning on graph-structured data. Applications of spectral GCNs have been successful, but limited to a few problems where the graph is fixed, such as shape correspondence and node classification. In this work, we address this limitation by revisiting a particular family of spectral graph networks, Chebyshev GCNs, showing its efficacy in solving graph classification tasks with a variable graph structure and size. Current GCNs also restrict graphs to have at most one edge between any pair of nodes. To this end, we propose a novel multigraph network that learns from multi-relational graphs. We explicitly model different types of edges: annotated edges, learned edges with abstract meaning, and hierarchical edges. We also experiment with different ways to fuse the representations extracted from different edge types. This restriction is sometimes implied from a dataset, however, we relax this restriction for all kinds of datasets. We achieve state-of-the-art results on a variety of chemical, social, and vision graph classification benchmarks.", "keywords": ["graph convolution", "hierarchical models", "neural networks", "multigraph", "deep learning"], "authorids": ["bknyazev@uoguelph.ca", "xiao.lin@sri.com", "mohamed.amer@sri.com", "gwtaylor@uoguelph.ca"], "authors": ["Boris Knyazev", "Xiao Lin", "Mohamed R. Amer", "Graham W. Taylor"], "TL;DR": "A novel approach to graph classification based on spectral graph convolutional networks and its extension to multigraphs with learnable relations and hierarchical structure. We show state-of-the art results on chemical, social and image datasets.", "pdf": "/pdf/2780766fb8a334a6488b6abc29ffd6f8348d30a8.pdf", "paperhash": "knyazev|spectral_convolutional_networks_on_hierarchical_multigraphs"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087891203, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rkl5CjC9Fm", "original": "B1l5SqacYX", "number": 920, "cdate": 1538087889978, "ddate": null, "tcdate": 1538087889978, "tmdate": 1683306259483, "tddate": null, "forum": "rkl5CjC9Fm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Dual Importance Weight GAN", "abstract": "Generative Adversarial Networks (GAN) are trained to generate a sample image of interest. To this end, generative network of GAN learns implicit distribution of true dataset from the classification samples with candidate generated samples. However, in real implementation of GAN, training the generative network with limited number of candidate samples guarantees to properly represent neither true distribution nor the distribution of generator outputs. In this paper, we propose dual importance weights for the candidate samples represented in the latent space of auto-encoder. The auto-encoder is pre-trained with real target dataset. Therefore, the latent space representation allows us to compare real distribution and the distribution of generated samples explicitly. Dual importance weights iteratively maximize the representation of generated samples for both distributions: current generator outputs and real dataset. Proposed generative model not only resolves mode collapse problem of GAN but also improves the convergence on target distribution. Experimental evaluation shows that the proposed network learns complete modes of target distribution more stable and faster than state of the art methods. ", "keywords": [], "authorids": ["waldstein94@gmail.com", "seungkyu@khu.ac.kr"], "authors": ["Gahye Lee", "Seungkyu Lee"], "pdf": "/pdf/b412aa2d4ec85c8f7f738a2c0a96571ac67e0aec.pdf", "paperhash": "lee|dual_importance_weight_gan"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087890002, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJlr0j0ctX", "original": "Sklbqh35FX", "number": 892, "cdate": 1538087885129, "ddate": null, "tcdate": 1538087885129, "tmdate": 1750551534902, "tddate": null, "forum": "BJlr0j0ctX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Label Smoothing and Logit Squeezing: A Replacement for Adversarial Training?", "abstract": "Adversarial training is one of the strongest defenses against adversarial attacks, but it requires adversarial examples to be generated for every mini-batch during optimization. The expense of producing these examples during training often precludes adversarial training from use on complex image datasets. \nIn this study, we explore the mechanisms by which adversarial training improves classifier robustness, and show that these mechanisms can be effectively mimicked using simple regularization methods, including label smoothing and logit squeezing. \nRemarkably, using these simple regularization methods in combination with Gaussian noise injection, we are able to achieve strong adversarial robustness -- often exceeding that of adversarial training -- using no adversarial examples.", "keywords": ["adversarial machine learning", "machine learning security"], "authorids": ["ashafahi@cs.umd.edu", "amin@cs.umd.edu", "furongh@cs.umd.edu", "tomg@cs.umd.edu"], "authors": ["Ali Shafahi", "Amin Ghiasi", "Furong Huang", "Tom Goldstein"], "TL;DR": "Achieving strong adversarial robustness comparable to adversarial training without training on adversarial examples", "pdf": "/pdf/d75dc829e6eda0f9435238bde00479273efd8985.pdf", "paperhash": "shafahi|label_smoothing_and_logit_squeezing_a_replacement_for_adversarial_training", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/label-smoothing-and-logit-squeezing-a/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087885154, "details": {"replyCount": 26, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1gVRi0qFQ", "original": "BkxO02acYm", "number": 883, "cdate": 1538087883589, "ddate": null, "tcdate": 1538087883589, "tmdate": 1750551534948, "tddate": null, "forum": "B1gVRi0qFQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Online abstraction with MDP homomorphisms for Deep Learning", "abstract": "Abstraction of Markov Decision Processes is a useful tool for solving complex problems, as it can ignore unimportant aspects of an environment, simplifying the process of learning an optimal policy. In this paper, we propose a new algorithm for finding abstract MDPs in environments with continuous state spaces. It is based on MDP homomorphisms, a structure-preserving mapping between MDPs. We demonstrate our algorithm's ability to learns abstractions from collected experience and show how to reuse the abstractions to guide exploration in new tasks the agent encounters. Our novel task transfer method beats a baseline based on a deep Q-network.", "keywords": ["reinforcement learning", "abstraction", "mdp homomorphism", "deep learning", "robotics"], "authorids": ["bizaondr@fit.cvut.cz", "rplatt@ccs.neu.edu"], "authors": ["Ondrej Biza", "Robert Platt"], "TL;DR": "We create abstract models of environments from experience and use them to learn new tasks faster.", "pdf": "/pdf/03a3376e005459b1cb8bbdcd49c493c72b0acba7.pdf", "paperhash": "biza|online_abstraction_with_mdp_homomorphisms_for_deep_learning", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/online-abstraction-with-mdp-homomorphisms-for/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087883618, "details": {"replyCount": 3, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SkfQAiA9YX", "original": "H1l2DgOcFm", "number": 879, "cdate": 1538087882884, "ddate": null, "tcdate": 1538087882884, "tmdate": 1683306259224, "tddate": null, "forum": "SkfQAiA9YX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "In search of theoretically grounded pruning", "abstract": "Deep learning relies on resource-heavy linear algebra operations which can be prohibitively expensive when deploying to constrained embedded and mobile devices, or even when training large-scale networks. One way to reduce a neural network's resource requirements is to sparsify its weight matrices - a process often referred to as pruning. It is typically achieved by removing least important weights as measured by some salience criterion, with pruning by magnitude being the most popular option. This, however, often makes close to random judgments. In this paper we aim to closely investigate the concept of model weight importance, with a particular focus on the magnitude criterion and its most suitable substitute. To this end we identify a suitable Statistical framework and derive deep model parameter asymptotic theory to use with it. Thus, we derive a statistically-grounded pruning criterion which we compare with the magnitude pruning both qualitatively and quantitatively. We find this criterion to better capture parameter salience, by accounting for its estimation uncertainty. This results in improved performance and easier post-pruned re-training.", "keywords": [], "authorids": ["filip.svoboda@stx.ox.ac.uk", "edgar.liberis@chch.ox.ac.uk", "nicholas.lane@cs.ox.ac.uk"], "authors": ["Filip Svoboda", "Edgar Liberis", "Nicholas D. Lane"], "pdf": "/pdf/2dc2a07619bd7dbae1ec080e6e0e1f648afbafcd.pdf", "paperhash": "svoboda|in_search_of_theoretically_grounded_pruning"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087882915, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "H1GWAoRcKX", "original": "SylGY4p5FX", "number": 868, "cdate": 1538087880910, "ddate": null, "tcdate": 1538087880910, "tmdate": 1683306258981, "tddate": null, "forum": "H1GWAoRcKX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "A Teacher Student Network For Faster Video Classification", "abstract": "Over the past few years, various tasks involving videos such as classification, description, summarization and question answering have received a lot of attention. Current models for these tasks compute an encoding of the video by treating it as a sequence of images and going over every image in the sequence, which becomes computationally expensive for longer videos. In this paper, we focus on the task of video classification and aim to reduce the computational cost by using the idea of distillation. Specifically, we propose a Teacher-Student network wherein the teacher looks at all the frames in the video but the student looks at only a small fraction of the frames in the video. The idea is to then train the student to minimize (i) the difference between the final representation computed by the student and the teacher and/or (ii) the difference between the distributions predicted by the teacher and the student. This smaller student network which involves fewer computations but still learns to mimic the teacher can then be employed at inference time for video classification. We experiment with the YouTube-8M dataset and show that the proposed student network can reduce the inference time by upto 30% with a negligent drop in the performance. ", "keywords": ["video classification", "efficient computation", "knowledge distillation", "teacher-student"], "authorids": ["cs16s003@cse.iitm.ac.in", "msrinivasan@nvidia.com", "miteshk@cse.iitm.ac.in"], "authors": ["Shweta Bhardwaj", "Mukundhan Srinivasan", "Mitesh M. Khapra"], "TL;DR": "Teacher-Student framework for efficient video classification using fewer frames ", "pdf": "/pdf/b2e92652dd0a0aac936aa2b618b6961e2a97032f.pdf", "paperhash": "bhardwaj|a_teacher_student_network_for_faster_video_classification"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087880936, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1eZCjA9KX", "original": "HJxSr_FctX", "number": 866, "cdate": 1538087880546, "ddate": null, "tcdate": 1538087880546, "tmdate": 1683306258825, "tddate": null, "forum": "B1eZCjA9KX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "IncSQL: Training Incremental Text-to-SQL Parsers with Non-Deterministic Oracles", "abstract": "We present a sequence-to-action parsing approach for the natural language to SQL task that incrementally fills the slots of a SQL query with feasible actions from a pre-defined inventory. To account for the fact that typically there are multiple correct SQL queries with the same or very similar semantics, we draw inspiration from syntactic parsing techniques and propose to train our sequence-to-action models with non-deterministic oracles. We evaluate our models on the WikiSQL dataset and achieve an execution accuracy of 83.7% on the test set, a 2.1% absolute improvement over the models trained with traditional static oracles assuming a single correct target SQL query. When further combined with the execution-guided decoding strategy, our model sets a new state-of-the-art performance at an execution accuracy of 87.1%.", "keywords": ["semantic parsing", "non-deterministic oracles", "natural language to SQL", "incremental parsing", "sequence prediction"], "authorids": ["tianze@cs.cornell.edu", "kedart@stanford.edu", "kaushik@microsoft.com", "maoyi@microsoft.com", "polozov@microsoft.com", "wzchen@microsoft.com"], "authors": ["Tianze Shi", "Kedar Tatwawadi", "Kaushik Chakrabarti", "Yi Mao", "Oleksandr Polozov", "Weizhu Chen"], "TL;DR": "We design incremental sequence-to-action parsers for text-to-SQL task and achieve SOTA results. We further improve by using non-deterministic oracles to allow multiple correct action sequences. ", "pdf": "/pdf/2ad29c4a27386931a85355b9abce5de773accb04.pdf", "paperhash": "shi|incsql_training_incremental_texttosql_parsers_with_nondeterministic_oracles"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087880569, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "Bylj6oC5K7", "original": "Ske5kV4cKX", "number": 833, "cdate": 1538087874860, "ddate": null, "tcdate": 1538087874860, "tmdate": 1683306258678, "tddate": null, "forum": "Bylj6oC5K7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Logit Regularization Methods for Adversarial Robustness", "abstract": "While great progress has been made at making neural networks effective across a wide range of tasks, many are surprisingly vulnerable to small, carefully chosen perturbations of their input, known as adversarial examples. In this paper, we advocate for and experimentally investigate the use of logit regularization techniques as an adversarial defense, which can be used in conjunction with other methods for creating adversarial robustness at little to no cost. We demonstrate that much of the effectiveness of one recent adversarial defense mechanism can be attributed to logit regularization and show how to improve its defense against both white-box and black-box attacks, in the process creating a stronger black-box attacks against PGD-based models.\n", "keywords": ["adversarial"], "authorids": ["ceciliasummers07@gmail.com", "mjd@cs.auckland.ac.nz"], "authors": ["Cecilia Summers", "Michael J. Dinneen"], "TL;DR": "Logit regularization methods help explain and improve state of the art adversarial defenses", "pdf": "/pdf/e9c2bf2cd826b46016825ad0131402099c694fd5.pdf", "paperhash": "summers|logit_regularization_methods_for_adversarial_robustness"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087874885, "details": {"replyCount": 19, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HJxKajC5t7", "original": "B1eJAoXtFQ", "number": 823, "cdate": 1538087873111, "ddate": null, "tcdate": 1538087873111, "tmdate": 1683306258528, "tddate": null, "forum": "HJxKajC5t7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Self-Binarizing Networks", "abstract": "We present a method to train self-binarizing neural networks, that is, networks that evolve their weights and activations during training to become binary. To obtain similar binary networks, existing methods rely on the sign activation function. This function, however, has no gradients for non-zero values, which makes standard backpropagation impossible. To circumvent the difficulty of training a network relying on the sign activation function, these methods alternate between floating-point and binary representations of the network during training, which is sub-optimal and inefficient. We approach the binarization task by training on a unique representation involving a smooth activation function, which is iteratively sharpened during training until it becomes a binary representation equivalent to the sign activation function. Additionally, we introduce a new technique to perform binary batch normalization that simplifies the conventional batch normalization by transforming it into a simple comparison operation. This is unlike existing methods, which are forced to the retain the conventional floating-point-based batch normalization. Our binary networks, apart from displaying advantages of lower memory and computation as compared to conventional floating-point and binary networks, also show higher classification accuracy than existing state-of-the-art methods on multiple benchmark datasets.", "keywords": ["Binarization", "Convolutional Neural Networks", "Deep Learning", "Deep Neural Networks"], "authorids": ["fayez.lahoud@epfl.ch", "radhakrishna.achanta@epfl.ch", "pablo.marquez@artorg.unibe.ch", "sabine.susstrunk@epfl.ch"], "authors": ["Fayez Lahoud", "Radhakrishna Achanta", "Pablo M\u00e1rquez-Neila", "Sabine S\u00fcsstrunk"], "TL;DR": "A method to binarize both weights and activations of a deep neural network that is efficient in computation and memory usage and performs better than the state-of-the-art.", "pdf": "/pdf/826ba0af1a9860a3a83f7d562f9dbc0d4da8ab57.pdf", "paperhash": "lahoud|selfbinarizing_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087873135, "details": {"replyCount": 9, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rkxdpiA5Ym", "original": "Hkg7cQa9FX", "number": 816, "cdate": 1538087871902, "ddate": null, "tcdate": 1538087871902, "tmdate": 1683306258485, "tddate": null, "forum": "rkxdpiA5Ym", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Diagnosing Language Inconsistency in Cross-Lingual Word Embeddings", "abstract": "Cross-lingual embeddings encode meaning of words from different languages into a shared low-dimensional space. However, despite numerous applications, evaluation of such embeddings is limited. We focus on diagnosing the problem of words segregated by languages in cross-lingual embeddings. In an ideal cross-lingual embedding, word similarity should be independent of language---i.e., words within a language should not be more similar to each other than to words in another language. One test of this is modularity, a network measurement that measures the strength of clusters in a graph. When we apply this measure to a nearest neighbor graph, imperfect cross-lingual embeddings are sorted into modular, distinct regions. The correlation of this measurement with accuracy on two downstream tasks demonstrates that modularity can serve as an intrinsic metric of embedding quality.", "keywords": ["cross-lingual embeddings", "evaluation", "graph-based metric", "modularity"], "authorids": ["yoshinari.fujinuma@colorado.edu", "jbg@umiacs.umd.edu", "michael.j.paul@colorado.edu"], "authors": ["Yoshinari Fujinuma", "Jordan Boyd-Graber", "Michael J. Paul"], "pdf": "/pdf/8533d615482f0001f80701b5ace49b74f658f60d.pdf", "paperhash": "fujinuma|diagnosing_language_inconsistency_in_crosslingual_word_embeddings"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087871928, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SJxbps09K7", "original": "H1xS1qrqY7", "number": 777, "cdate": 1538087865139, "ddate": null, "tcdate": 1538087865139, "tmdate": 1683306258380, "tddate": null, "forum": "SJxbps09K7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Empirical observations on the instability of aligning word vector spaces with GANs", "abstract": "Unsupervised bilingual dictionary induction (UBDI) is useful for unsupervised machine translation and for cross-lingual transfer of models into low-resource languages. One approach to UBDI is to align word vector spaces in different languages using Generative adversarial networks (GANs) with linear generators, achieving state-of-the-art performance for several language pairs. For some pairs, however, GAN-based induction is unstable or completely fails to align the vector spaces. We focus on cases where linear transformations provably exist, but the performance of GAN-based UBDI depends heavily on the model initialization. We show that the instability depends on the shape and density of the vector sets, but not on noise; it is the result of local optima, but neither over-parameterization nor changing the batch size or the learning rate consistently reduces instability. Nevertheless, we can stabilize GAN-based UBDI through best-of-N model selection, based on an unsupervised stopping criterion. ", "keywords": ["natural language processing", "bilingual dictionary induction", "unsupervised learning", "generative adversarial networks"], "authorids": ["hartmann@di.ku.dk", "yova@di.ku.dk", "soegaard@di.ku.dk"], "authors": ["Mareike Hartmann", "Yova Kementchedjhieva", "Anders S\u00f8gaard"], "TL;DR": "An empirical investigation of GAN-based alignment of word vector spaces, focusing on cases, where linear transformations provably exist, but training is unstable.", "pdf": "/pdf/c95badf04bce091af50e1d449fad519fea5ee36b.pdf", "paperhash": "hartmann|empirical_observations_on_the_instability_of_aligning_word_vector_spaces_with_gans"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087865165, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "H1eZ6sRcFm", "original": "BklVkvgGYQ", "number": 775, "cdate": 1538087864739, "ddate": null, "tcdate": 1538087864739, "tmdate": 1683306258220, "tddate": null, "forum": "H1eZ6sRcFm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Variational Autoencoders for Text Modeling without Weakening the Decoder", "abstract": "Previous work (Bowman et al., 2015; Yang et al., 2017) has found difficulty developing generative models based on variational autoencoders (VAEs) for text. To address the problem of the decoder ignoring information from the encoder (posterior collapse), these previous models weaken the capacity of the decoder to force the model to use information from latent variables. However, this strategy is not ideal as it degrades the quality of generated text and increases hyper-parameters. In this paper, we propose a new VAE for text utilizing a multimodal prior distribution, a modified encoder, and multi-task learning. We show our model can generate well-conditioned sentences without weakening the capacity of the decoder. Also, the multimodal prior distribution improves the interpretability of acquired representations.", "keywords": ["variational autoencoders", "generative model", "deep neural network", "text modeling", "unsupervised learning", "multimodal"], "authorids": ["ryo_kamoi_st@keio.jp", "hiroyasu.fukutomi@datasection.co.jp"], "authors": ["Ryo Kamoi", "Hiroyasu Fukutomi"], "TL;DR": "We propose a model of variational autoencoders for text modeling without weakening the decoder, which improves the quality of text generation and interpretability of acquired representations.", "pdf": "/pdf/ccdf57c5e7510564837e4345dbf4dc805eb446fd.pdf", "paperhash": "kamoi|variational_autoencoders_for_text_modeling_without_weakening_the_decoder"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087864764, "details": {"replyCount": 11, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1g-TiC9FX", "original": "H1xiLwa5t7", "number": 774, "cdate": 1538087864579, "ddate": null, "tcdate": 1538087864579, "tmdate": 1683306258163, "tddate": null, "forum": "r1g-TiC9FX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Neural Collobrative Networks", "abstract": "This paper presents a conceptually general and modularized neural collaborative network (NCN), which overcomes the limitations of the traditional convolutional neural networks (CNNs) in several aspects. Firstly, our NCN can directly handle non-Euclidean data without any pre-processing (e.g., graph normalizations) by defining a simple yet basic unit named neuron array for feature representation. Secondly, our NCN is capable of achieving both rotational equivariance and invariance properties via a simple yet powerful neuron collaboration mechanism, which imposes a ``glocal'' operation to capture both global and local information among neuron arrays within each layer. Thirdly, compared to the state-of-the-art networks that using large CNN kernels, our NCN with considerably fewer parameters can also achieve their strengths in feature learning by only exploiting highly efficient 1x1 convolution operations. Extensive experimental analyses on learning feature representation, handling novel viewpoints, and handling non-euclidean data demonstrate that our NCN can not only achieve state-of-the-art performance but also overcome the limitation of the conventional CNNs. The source codes will be released to facilite future researches after the review period for ensuring the anonymity.", "keywords": ["deep learning", "neural architecture search", "collaboration representation learning"], "authorids": ["wanggrun@mail2.sysu.edu.cn", "kezewang@gmail.com", "linliang@ieee.org"], "authors": ["Guangrun Wang", "Keze Wang", "Liang Lin"], "pdf": "/pdf/94a849acbbf154d794f5a2bbe73a3934288f743e.pdf", "paperhash": "wang|neural_collobrative_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087864601, "details": {"replyCount": 1, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rygypo0qtm", "original": "Bke46Ip5tX", "number": 764, "cdate": 1538087862865, "ddate": null, "tcdate": 1538087862865, "tmdate": 1683306258060, "tddate": null, "forum": "rygypo0qtm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Show, Attend and Translate: Unsupervised Image Translation with Self-Regularization and Attention", "abstract": "Image translation between two domains is a class of problems aiming to learn mapping from an input image in the source domain to an output image in the target domain. It has been applied to numerous applications, such as data augmentation, domain adaptation, and unsupervised training. When paired training data is not accessible, image translation becomes an ill-posed problem. We constrain the problem with the assumption that the translated image needs to be perceptually similar to the original image and also appears to be drawn from the new domain, and propose a simple yet effective image translation model consisting of a single generator trained with a self-regularization term and an adversarial term. We further notice that existing image translation techniques are agnostic to the subjects of interest and often introduce unwanted changes or artifacts to the input. Thus we propose to add an attention module to predict an attention map to guide the image translation process. The module learns to attend to key parts of the image while keeping everything else unaltered, essentially avoiding undesired artifacts or changes. The predicted attention map also opens door to applications such as unsupervised segmentation and saliency detection. Extensive experiments and evaluations show that our model while being simpler, achieves significantly better performance than existing image translation methods.", "keywords": ["image translation", "domain adaptation", "saliency detection"], "authorids": ["harryyang.hk@gmail.com"], "authors": ["Chao Yang"], "TL;DR": "We propose a simple generative model for unsupervised image translation and saliency detection.", "pdf": "/pdf/8315e848c1166780ef1573433719648fb55b113e.pdf", "paperhash": "yang|show_attend_and_translate_unsupervised_image_translation_with_selfregularization_and_attention"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087862889, "details": {"replyCount": 1, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BylahsR9tX", "original": "rJx93M2qKQ", "number": 753, "cdate": 1538087860973, "ddate": null, "tcdate": 1538087860973, "tmdate": 1683306257919, "tddate": null, "forum": "BylahsR9tX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Low-Rank Matrix Factorization of LSTM as Effective Model Compression", "abstract": "Large-scale Long Short-Term Memory (LSTM) cells are often the building blocks of many state-of-the-art algorithms for tasks in Natural Language Processing (NLP). However, LSTMs are known to be computationally inefficient because the memory capacity of the models depends on the number of parameters, and the inherent recurrence that models the temporal dependency is not parallelizable. In this paper, we propose simple, but effective, low-rank matrix factorization (MF) algorithms to compress network parameters and significantly speed up LSTMs with almost no loss of performance (and sometimes even gain). To show the effectiveness of our method across different tasks, we examine two settings: 1) compressing core LSTM layers in Language Models, 2) compressing biLSTM layers of ELMo~\\citep{ELMo} and evaluate in three downstream NLP tasks (Sentiment Analysis, Textual Entailment, and Question Answering). The latter is particularly interesting as embeddings from large pre-trained biLSTM Language Models are often used as contextual word representations. Finally, we discover that matrix factorization performs better in general, additive recurrence is often more important than multiplicative recurrence, and we identify an interesting correlation between matrix norms and compression performance.\n\n", "keywords": ["NLP", "LSTM", "Compression", "Low Rank", "Norm Analysis"], "authorids": ["giwinata@connect.ust.hk", "amadotto@connect.ust.hk", "jay.shin@connect.ust.hk", "ejs@connect.ust.hk"], "authors": ["Genta Indra Winata", "Andrea Madotto", "Jamin Shin", "Elham J. Barezi"], "TL;DR": "We propose simple, but effective, low-rank matrix factorization (MF) algorithms to speed up in running time, save memory, and improve the performance of LSTMs.", "pdf": "/pdf/48f989c43de9234ec6ff3e81bd6d5a8c97f034a8.pdf", "paperhash": "winata|lowrank_matrix_factorization_of_lstm_as_effective_model_compression"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087860999, "details": {"replyCount": 9, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "Skxqni09KX", "original": "rylyIBTcYQ", "number": 736, "cdate": 1538087858005, "ddate": null, "tcdate": 1538087858005, "tmdate": 1683306257806, "tddate": null, "forum": "Skxqni09KX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Online Bellman Residue Minimization via Saddle Point Optimization", "abstract": "We study the problem of Bellman residual minimization with nonlinear function approximation in general. \n Based on a nonconvex saddle point formulation of Bellman residual minimization via Fenchel duality, we propose an online first-order algorithm with two-timescale learning rates. Using tools from stochastic approximation, we establish the convergence of our problem by approximating the dynamics of the iterates using two ordinary differential equations. Moreover, as a byproduct, we establish a finite-time convergence result under the assumption that the dual problem can be solved up to some error. Finally, numerical experiments are provided to back up our theory.", "keywords": [], "authorids": ["zy6@princeton.edu", "mikechzhou@tencent.com", "tongzhang@tongzhang-ml.org", "hanliu.cmu@gmail.com"], "authors": ["Zhuoran Yang", "Cheng Zhou", "Tong Zhang", "Han Liu"], "pdf": "/pdf/b11e23478a490f3d19b723cedde7cc270e256f4f.pdf", "paperhash": "yang|online_bellman_residue_minimization_via_saddle_point_optimization"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087858033, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rklvnjRqY7", "original": "B1gjY429FQ", "number": 718, "cdate": 1538087854869, "ddate": null, "tcdate": 1538087854869, "tmdate": 1683306257767, "tddate": null, "forum": "rklvnjRqY7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "A PRIVACY-PRESERVING IMAGE CLASSIFICATION FRAMEWORK WITH A LEARNABLE OBFUSCATOR", "abstract": "Real world images often contain large amounts of private / sensitive information that should be carefully protected without reducing their utilities. In this paper, we propose a privacy-preserving deep learning framework with a learnable ob- fuscator for the image classification task. Our framework consists of three mod- els: learnable obfuscator, classifier and reconstructor. The learnable obfuscator is used to remove the sensitive information in the images and extract the feature maps from them. The reconstructor plays the role as an attacker, which tries to recover the image from the feature maps extracted by the obfuscator. In order to best protect users\u2019 privacy in images, we design an adversarial training methodol- ogy for our framework to optimize the obfuscator. Through extensive evaluations on real world datasets, both the numerical metrics and the visualization results demonstrate that our framework is qualified to protect users\u2019 privacy and achieve a relatively high accuracy on the image classification task.", "keywords": ["privacy-preserving", "image classification", "adversarial training", "learnable obfuscator"], "authorids": ["xy.meng@my.cityu.edu.hk", "zixuhuang3-c@my.cityu.edu.hk", "yf.du@my.cityu.edu.hk", "abchan@cityu.edu.hk", "congwang@cityu.edu.hk"], "authors": ["Xiangyi Meng", "Zixuan Huang", "Yuefeng Du", "Antoni Chan", "Cong Wang"], "TL;DR": "We proposed a novel deep learning image classification framework that can both accurately classify images and protect users' privacy.", "pdf": "/pdf/752815624ec5f12edeeb071f79df32b66b302874.pdf", "paperhash": "meng|a_privacypreserving_image_classification_framework_with_a_learnable_obfuscator"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087854895, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJe8niAqKX", "original": "ByeM5UDcFX", "number": 712, "cdate": 1538087853814, "ddate": null, "tcdate": 1538087853814, "tmdate": 1683306257696, "tddate": null, "forum": "BJe8niAqKX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Learning Grounded Sentence Representations by Jointly Using Video and Text Information", "abstract": "Visual grounding of language is an active research field aiming at enriching text-based representations with visual information. In this paper, we propose a new way to leverage visual knowledge for sentence representations. Our approach transfers the structure of a visual representation space to the textual space by using two complementary sources of information: (1) the cluster information: the implicit knowledge that two sentences associated with the same visual content describe the same underlying reality and (2) the perceptual information contained within the structure of the visual space. We use a joint approach to encourage beneficial interactions during training between textual, perceptual, and cluster information. We demonstrate the quality of the learned representations on semantic relatedness, classification, and cross-modal retrieval tasks.", "keywords": ["multimodal", "sentence", "representation", "embedding", "grounding"], "authorids": ["patrick.bordes@lip6.fr", "eloi.zablocki@gmail.com", "laure.soulier@lip6.fr", "benjamin.piwowarski@lip6.fr", "patrick.gallinari@lip6.fr"], "authors": ["Patrick Bordes", "Eloi Zablocki", "Laure Soulier", "Benjamin Piwowarski", "Patrick Gallinari"], "TL;DR": "We propose a joint model to incorporate visual knowledge in sentence representations", "pdf": "/pdf/53981a1ccee080ed45eb87db1054eb2586a73aef.pdf", "paperhash": "bordes|learning_grounded_sentence_representations_by_jointly_using_video_and_text_information"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087853838, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HyxH2o05FQ", "original": "BJlEqG99YQ", "number": 708, "cdate": 1538087853114, "ddate": null, "tcdate": 1538087853114, "tmdate": 1683306257624, "tddate": null, "forum": "HyxH2o05FQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Domain Adaptive Transfer Learning", "abstract": "Transfer learning is a widely used method to build high performing computer vision models. In this paper, we study the efficacy of transfer learning by examining how the choice of data impacts performance. We find that more pre-training data does not always help, and transfer performance depends on a judicious choice of pre-training data. These findings are important given the continued increase in dataset sizes. We further propose domain adaptive transfer learning, a simple and effective pre-training method using importance weights computed based on the target dataset. Our methods achieve state-of-the-art results on multiple fine-grained classification datasets and are well-suited for use in practice.", "keywords": [], "authorids": ["jngiam@google.com", "daiyip@google.com", "vrv@google.com", "skornblith@google.com", "qvl@google.com", "rpang@google.com"], "authors": ["Jiquan Ngiam", "Daiyi Peng", "Vijay Vasudevan", "Simon Kornblith", "Quoc Le", "Ruoming Pang"], "pdf": "/pdf/c99d57f55de88a6c1e3a3f6525526550c15e61e1.pdf", "paperhash": "ngiam|domain_adaptive_transfer_learning"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087853140, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1e-nj05FQ", "original": "rJgjMk69Fm", "number": 683, "cdate": 1538087848636, "ddate": null, "tcdate": 1538087848636, "tmdate": 1683306257491, "tddate": null, "forum": "r1e-nj05FQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Evolving intrinsic motivations for altruistic behavior", "abstract": "Multi-agent cooperation is an important feature of the natural world. Many tasks involve individual incentives that are misaligned with the common good, yet a wide range of organisms from bacteria to insects and humans are able to overcome their differences and collaborate. Therefore, the emergence of cooperative behavior amongst self-interested individuals is an important question for the fields of multi-agent reinforcement learning (MARL) and evolutionary theory. Here, we study a particular class of multi-agent problems called intertemporal social dilemmas (ISDs), where the conflict between the individual and the group is particularly sharp. By combining MARL with appropriately structured natural selection, we demonstrate that individual inductive biases for cooperation can be learned in a model-free way. To achieve this, we introduce an innovative modular architecture for deep reinforcement learning agents which supports multi-level selection. We present results in two challenging environments, and interpret these in the context of cultural and ecological evolution.", "keywords": ["evolution", "reinforcement learning", "intrinsic reward", "multi-agent", "social dilemmas", "cooperation"], "authorids": ["wangjane@google.com", "edwardhughes@google.com", "chrisantha@google.com", "lejlot@google.com", "duenez@google.com", "jzl@google.com"], "authors": ["Jane X. Wang", "Edward Hughes", "Chrisantha Fernando", "Wojciech M. Czarnecki", "Edgar A. Duenez-Guzman", "Joel Z. Leibo"], "TL;DR": "We introduce a biologically-inspired modular evolutionary algorithm in which deep RL agents learn to cooperate in a difficult multi-agent social game, which could help to explain the evolution of altruism.", "pdf": "/pdf/4218358716b33f65f74f2082c53762c81038e96f.pdf", "paperhash": "wang|evolving_intrinsic_motivations_for_altruistic_behavior"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087848662, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "Syfe2iR5FQ", "original": "BJloOD29Km", "number": 679, "cdate": 1538087847929, "ddate": null, "tcdate": 1538087847929, "tmdate": 1683306257270, "tddate": null, "forum": "Syfe2iR5FQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Parametrizing Fully Convolutional Nets with a Single High-Order Tensor", "abstract": "Recent findings indicate that over-parametrization, while crucial to the success of deep learning, also introduces large amounts of redundancy. Tensor methods have the potential to parametrize over-complete representations in a compact manner by leveraging this redundancy. In this paper, we propose fully parametrizing Convolutional Neural Networks (CNNs) with a single, low-rank tensor. Previous works on network tensorization haved focused on parametrizing individual layers (convolutional or fully connected) only, and perform the tensorization layer-by-layer disjointly. In contrast, we propose to jointly capture the full structure of a CNN by parametrizing it with a single, high-order tensor, the modes of which represent each of the architectural design parameters of the CNN (e.g. number of convolutional blocks, depth, number of stacks, input features, etc). This parametrization allows to regularize the whole network and drastically reduce the number of parameters by imposing a low-rank structure on that tensor. Further, our network is end-to-end trainable from scratch, which has been shown to be challenging in prior work. We study the case of networks with rich structure, namely Fully Convolutional CNNs, which we propose to parametrize them with a single 8-dimensional tensor. We show that our approach can achieve superior performance with small compression rates, and attain high compression rates with negligible drop in accuracy for the challenging task of human pose estimation.\n", "keywords": [], "authorids": ["jean.kossaifi@gmail.com", "bulat.adrian@gmail.com", "yorgos.tzimiropoulos@nottingham.ac.uk", "maja.pantic@gmail.com"], "authors": ["Jean Kossaifi", "Adrian Bulat", "Georgios Tzimiropoulos", "Maja Pantic"], "pdf": "/pdf/44d45e772faed1f8bba36227a172d1265fc702dc.pdf", "paperhash": "kossaifi|parametrizing_fully_convolutional_nets_with_a_single_highorder_tensor"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087847954, "details": {"replyCount": 8, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SyfdsjA9FX", "original": "S1lxkb8qFQ", "number": 637, "cdate": 1538087840280, "ddate": null, "tcdate": 1538087840280, "tmdate": 1750551535556, "tddate": null, "forum": "SyfdsjA9FX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Live Face De-Identification in Video", "abstract": "We propose a method for face de-identification that enables fully automatic video modification at high frame rates. The goal is to maximally decorrelate the identity, while having the perception (pose, illumination and expression) fixed. We achieve this by a novel feed forward encoder-decoder network architecture that is conditioned on the high-level representation of a person's facial image. The network is global, in the sense that it does not need to be retrained for a given video or for a given identity, and it creates natural-looking image sequences with little distortion in time. ", "keywords": [], "authorids": ["oran@fb.com", "wolf@fb.com", "yaniv@fb.com"], "authors": ["Oran Gafni", "Lior Wolf", "Yaniv Taigman"], "pdf": "/pdf/4dc73a81fc40f74ade9668bd8ff574d3bb0fc79d.pdf", "paperhash": "gafni|live_face_deidentification_in_video", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/live-face-de-identification-in-video/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087840308, "details": {"replyCount": 8, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HyldojC9t7", "original": "HJlC5oncYQ", "number": 633, "cdate": 1538087839561, "ddate": null, "tcdate": 1538087839561, "tmdate": 1683306257054, "tddate": null, "forum": "HyldojC9t7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "D2KE: From Distance to Kernel and Embedding via Random Features For Structured Inputs", "abstract": "We present a new methodology that constructs a family of \\emph{positive definite kernels} from any given dissimilarity measure on structured inputs whose elements are either real-valued time series or discrete structures such as strings, histograms, and graphs. \nOur approach, which we call D2KE (from Distance to Kernel and Embedding), draws from the literature of Random Features.\nHowever, instead of deriving random feature maps from a user-defined kernel to approximate kernel machines, we build a kernel from a random feature map, that we specify given the distance measure. \nWe further propose use of a finite number of random objects to produce a random feature embedding of each instance.\nWe provide a theoretical analysis showing that D2KE enjoys better generalizability than universal Nearest-Neighbor estimates. \nOn one hand, D2KE subsumes the widely-used \\emph{representative-set method} as a special case, and relates to the well-known \\emph{distance substitution kernel} in a limiting case. \nOn the other hand, D2KE generalizes existing \\emph{Random Features methods} applicable only to vector input representations to complex structured inputs of variable sizes. \nWe conduct classification experiments over such disparate domains as time series, strings, and histograms (for texts and images), for which our proposed framework compares favorably to existing distance-based learning methods in terms of both testing accuracy and computational time.", "keywords": ["Distance Kernel", "Embeddings", "Random Features", "Structured Inputs"], "authorids": ["lwu@email.wm.edu", "eyan@cs.cmu.edu", "fxu02@email.wm.edu", "pradeepr@cs.cmu.edu", "witbrock@us.ibm.com"], "authors": ["Lingfei Wu", "Ian E.H. Yen", "Fangli Xu", "Pradeep Ravikumar", "Michael J. Witbrock"], "TL;DR": "From Distance to Kernel and Embedding via Random Features For Structured Inputs", "pdf": "/pdf/feda529a5de3ef84a817cc087bb9eaf77aaddca9.pdf", "paperhash": "wu|d2ke_from_distance_to_kernel_and_embedding_via_random_features_for_structured_inputs"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087839586, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "ryfDoiR5Ym", "original": "ByeWKin5K7", "number": 631, "cdate": 1538087839146, "ddate": null, "tcdate": 1538087839146, "tmdate": 1683306256957, "tddate": null, "forum": "ryfDoiR5Ym", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Fatty and Skinny: A Joint Training Method of Watermark Encoder and Decoder", "abstract": "Watermarks have been used for various purposes. Recently, researchers started to look into using them for deep neural networks. Some works try to hide attack triggers on their adversarial samples when attacking neural networks and others want to watermark neural networks to prove their ownership against plagiarism. Implanting a backdoor watermark module into a neural network is getting more attention from the community. In this paper, we present a general purpose encoder-decoder joint training method, inspired by generative adversarial networks (GANs). Unlike GANs, however, our encoder and decoder neural networks cooperate to find the best watermarking scheme given data samples. In other words, we do not design any new watermarking strategy but our proposed two neural networks will find the best suited method on their own. After being trained, the decoder can be implanted into other neural networks to attack or protect them (see Appendix for their use cases and real implementations). To this end, the decoder should be very tiny in order not to incur any overhead when attached to other neural networks but at the same time provide very high decoding success rates, which is very challenging. Our joint training method successfully solves the problem and in our experiments maintain almost 100\\% encoding-decoding success rates for multiple datasets with very little modifications on data samples to hide watermarks. We also present several real-world use cases in Appendix.", "keywords": ["Adversarial Machine Learning", "Watermarking", "Generative Adversarial Networks"], "authorids": ["shhong@cs.umd.edu", "mmoham12@uncc.edu", "npark9@gmu.edu"], "authors": ["Sanghyun Hong", "Mahmoud Mohammadi", "Noseong Park"], "TL;DR": "We propose a novel watermark encoder-decoder neural networks. They perform a cooperative game to define their own watermarking scheme. People do not need to design watermarking methods any more.", "pdf": "/pdf/599b84fb70332752b375b408dd50c91869f727e9.pdf", "paperhash": "hong|fatty_and_skinny_a_joint_training_method_of_watermark_encoder_and_decoder"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087839176, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "H1xLsjAqtX", "original": "Sygz4Ih5FX", "number": 623, "cdate": 1538087837680, "ddate": null, "tcdate": 1538087837680, "tmdate": 1683306256753, "tddate": null, "forum": "H1xLsjAqtX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Robust Text Classifier on Test-Time Budgets", "abstract": "In this paper, we design a generic framework for learning a robust text classification model that achieves accuracy comparable to standard full models under test-time\nbudget constraints. We take a different approach from existing methods and learn to dynamically delete a large fraction of unimportant words by a low-complexity selector such that the high-complexity classifier only needs to process a small fraction of important words. In addition, we propose a new data aggregation method to train the classifier, allowing it to make accurate predictions even on fragmented sequence of words. Our end-to-end method achieves state-of-the-art performance while its computational complexity scales linearly with the small fraction of important words in the whole corpus. Besides, a single deep neural network classifier trained by our framework can be dynamically tuned to different budget levels at inference time.", "keywords": ["Data Aggregation", "Budget Learning", "Speed Up", "Faster Inference", "Robust Classifier"], "authorids": ["rizwan@cs.ucla.edu", "tolgab@bu.edu", "kwchang@cs.ucla.edu", "srv@bu.edu"], "authors": ["Md Rizwan Parvez", "Tolga Bolukbasi", "Kai-Wei Chang", "Venkatesh Saligrama"], "TL;DR": "Modular framework for document classification and data aggregation technique for making the framework robust to various distortion, and noise and focus only on the important words. ", "pdf": "/pdf/28ed4fdd0e03e6a6964bc29ce68b2c644a8aa4ff.pdf", "paperhash": "parvez|robust_text_classifier_on_testtime_budgets"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087837705, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SygNooCqY7", "original": "rJx6ypU5KQ", "number": 615, "cdate": 1538087836166, "ddate": null, "tcdate": 1538087836166, "tmdate": 1683306256623, "tddate": null, "forum": "SygNooCqY7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Noise-Tempered Generative Adversarial Networks", "abstract": "We present a novel method to stabilize the training of generative adversarial networks. The training stability is often undermined by the limited and low-dimensional support of the probability density function of the data samples. To address this problem we propose to simultaneously train the generative adversarial networks against different additive noise models, including the noise-free case. The benefits of this approach are that: 1) The case with noise added to both real and generated samples extends the support of the probability density function of the data, while not compromising the exact matching of the original data distribution, and 2) The noise-free case allows the exact matching of the original data distribution. We demonstrate our approach with both fixed additive noise and with learned noise models. We show that our approach results in a stable and well-behaved training of even the original minimax GAN formulation. Moreover, our technique can be incorporated in most modern GAN formulations and leads to a consistent improvement on several common datasets.", "keywords": [], "authorids": ["jenni@inf.unibe.ch", "paolo.favaro@inf.unibe.ch"], "authors": ["Simon Jenni", "Paolo Favaro"], "pdf": "/pdf/a2ffdb9d36b5f4255844f9b8eeabd48b517c40ef.pdf", "paperhash": "jenni|noisetempered_generative_adversarial_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087836190, "details": {"replyCount": 9, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "ryxMjoRcYm", "original": "rkx8POn5tm", "number": 605, "cdate": 1538087834372, "ddate": null, "tcdate": 1538087834372, "tmdate": 1683306256564, "tddate": null, "forum": "ryxMjoRcYm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Logically-Constrained Neural Fitted Q-iteration", "abstract": "This paper proposes a method for efficient training of Q-function for continuous-state Markov Decision Processes (MDP), such that the traces of the resulting policies satisfy a Linear Temporal Logic (LTL) property. LTL, a modal logic, can express a wide range of time-dependent logical properties including safety and liveness. We convert the LTL property into a limit deterministic Buchi automaton with which a synchronized product MDP is constructed. The control policy is then synthesised by a reinforcement learning algorithm assuming that no prior knowledge is available from the MDP. The proposed method is evaluated in a numerical study to test the quality of the generated control policy and is compared against conventional methods for policy synthesis such as MDP abstraction (Voronoi quantizer) and approximate dynamic programming (fitted value iteration). ", "keywords": [], "authorids": ["hosein.hasanbeig@cs.ox.ac.uk", "aabate@cs.ox.ac.uk", "kroening@cs.ox.ac.uk"], "authors": ["Mohammadhosein Hasanbeig", "Alessandro Abate", "Daniel Kroening"], "TL;DR": "As safety is becoming a critical notion in machine learning we believe that this work can act as a foundation for a number of research directions such as safety-aware learning algorithms.", "pdf": "/pdf/a5622046feae9285eddcb49e5019157b6b23df20.pdf", "paperhash": "hasanbeig|logicallyconstrained_neural_fitted_qiteration"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087834396, "details": {"replyCount": 9, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SJea5oAqK7", "original": "Hyez1sicYm", "number": 577, "cdate": 1538087829425, "ddate": null, "tcdate": 1538087829425, "tmdate": 1683306256462, "tddate": null, "forum": "SJea5oAqK7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "PASS: Phased Attentive State Space Modeling of Disease Progression Trajectories", "abstract": "Disease progression models are instrumental in predicting individual-level health\ntrajectories and understanding disease dynamics. Existing models are capable\nof providing either accurate predictions of patients\u2019 prognoses or clinically interpretable\nrepresentations of disease pathophysiology, but not both. In this paper,\nwe develop the phased attentive state space (PASS) model of disease progression,\na deep probabilistic model that captures complex representations for disease progression\nwhile maintaining clinical interpretability. Unlike Markovian state space\nmodels which assume memoryless dynamics, PASS uses an attention mechanism\nto induce \"memoryful\" state transitions, whereby repeatedly updated attention\nweights are used to focus on past state realizations that best predict future states.\nThis gives rise to complex, non-stationary state dynamics that remain interpretable\nthrough the generated attention weights, which designate the relationships between\nthe realized state variables for individual patients. PASS uses phased LSTM\nunits (with time gates controlled by parametrized oscillations) to generate the attention\nweights in continuous time, which enables handling irregularly-sampled\nand potentially missing medical observations. Experiments on data from a realworld\ncohort of patients show that PASS successfully balances the tradeoff between\naccuracy and interpretability: it demonstrates superior predictive accuracy\nand learns insightful individual-level representations of disease progression.", "keywords": [], "authorids": ["a7med3laa@hotmail.com", "mihaelaucla@gmail.com"], "authors": ["Ahmed M. Alaa", "Mihaela van der Schaar"], "pdf": "/pdf/f958ea21416a47303fa13f130be61dda69fa1cee.pdf", "paperhash": "alaa|pass_phased_attentive_state_space_modeling_of_disease_progression_trajectories"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087829448, "details": {"replyCount": 1, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "Skf5qiC5KQ", "original": "rylkR0q5t7", "number": 558, "cdate": 1538087826033, "ddate": null, "tcdate": 1538087826033, "tmdate": 1683306256318, "tddate": null, "forum": "Skf5qiC5KQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "A Unified View of Deep Metric Learning via Gradient Analysis", "abstract": "Loss functions play a pivotal role in deep metric learning (DML). A large variety of loss functions have been proposed in DML recently. However, it remains difficult to answer this question: what are the intrinsic differences among these loss functions?This paper answers this question by proposing a unified perspective to rethink deep metric loss functions. We show theoretically that most DML methods in deep metric learning, in view of gradient equivalence, are essentially weight assignment strategies of training pairs. Based on this unified view, we revisit several typical DML methods and disclose their hidden drawbacks. Moreover, we point out the key components of an effective DML approach which drives us to propose our weight assignment framework. We evaluate our method on image retrieval tasks, and show that it outperforms the state-of-the-art DML approaches by a significant margin on the CUB-200-2011, Cars-196, Stanford Online Products and In-Shop Clothes Retrieval datasets. ", "keywords": ["metric learning", "gradient equivalence", "image retrieval"], "authorids": ["xunwang@malong.com", "xinhan@malong.com", "whuang@malong.com"], "authors": ["Xun Wang", "Xintong Han", "Weilin Huang", "Dengke Dong", "Matthew R. Scott"], "pdf": "/pdf/dfaec45cc1c5650ffdb2e2e899e078a1ec8518ed.pdf", "paperhash": "wang|a_unified_view_of_deep_metric_learning_via_gradient_analysis"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087826058, "details": {"replyCount": 9, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SJlYcoCcKX", "original": "HyxNJSW5tQ", "number": 555, "cdate": 1538087825513, "ddate": null, "tcdate": 1538087825513, "tmdate": 1683306256263, "tddate": null, "forum": "SJlYcoCcKX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "KNOWLEDGE DISTILL VIA LEARNING NEURON MANIFOLD", "abstract": "Although deep neural networks show their extraordinary power in various tasks, they are not feasible for deploying such large models on embedded systems due to high computational cost and storage space limitation. The recent work knowledge distillation (KD) aims at transferring model knowledge from a well-trained teacher model to a small and fast student model which can significantly help extending the usage of large deep neural networks on portable platform. In this paper, we show that, by properly defining the neuron manifold of deep neuron network (DNN), we can significantly improve the performance of student DNN networks through approximating neuron manifold of powerful teacher network. To make this, we propose several novel methods for learning neuron manifold from DNN model. Empowered with neuron manifold knowledge, our experiments show the great improvement across a variety of DNN architectures and training data. Compared with other KD methods, our Neuron Manifold Transfer (NMT) has best transfer ability of the learned features.", "keywords": ["Deep Learning", "Machine Learning", "Knowledge Distill", "Model Compression"], "authorids": ["ztao@email.wm.edu", "qxia01@email.wm.edu", "liqun@cs.wm.edu"], "authors": ["Zeyi Tao", "Qi Xia", "Qun Li"], "TL;DR": "A new knowledge distill method for transfer learning", "pdf": "/pdf/28a2ef2f0e47d6462e9a1bcd5f09276ebb155ac1.pdf", "paperhash": "tao|knowledge_distill_via_learning_neuron_manifold"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087825539, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SkgYciAqF7", "original": "rklubsL5K7", "number": 552, "cdate": 1538087824878, "ddate": null, "tcdate": 1538087824878, "tmdate": 1683306256208, "tddate": null, "forum": "SkgYciAqF7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "N/A", "abstract": "N/A", "keywords": ["N/A"], "authorids": ["youngjoon.yoo@navercorp.com"], "authors": ["N/A"], "TL;DR": "N/A", "pdf": "/pdf/3a09b1bd66b6b7949ce84b4a9fcd9120310b6fca.pdf", "paperhash": "na|na"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087824904, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJgFcj0qKX", "original": "rylaCdlqt7", "number": 551, "cdate": 1538087824700, "ddate": null, "tcdate": 1538087824700, "tmdate": 1750551535809, "tddate": null, "forum": "BJgFcj0qKX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Stacked U-Nets: A No-Frills Approach to Natural Image Segmentation", "abstract": "Many imaging tasks require global information about all pixels in an image. Conventional bottom-up classification networks globalize information by decreasing resolution; features are pooled and down-sampled into a single output. But for semantic segmentation and object detection tasks, a network must provide higher-resolution pixel-level outputs. To globalize information while preserving resolution, many researchers propose the inclusion of sophisticated auxiliary blocks, but these come at the cost of a considerable increase in network size and computational cost. This paper proposes stacked u-nets (SUNets), which iteratively combine features from different resolution scales while maintaining resolution. SUNets leverage the information globalization power of u-nets in a deeper net- work architectures that is capable of handling the complexity of natural images. SUNets perform extremely well on semantic segmentation tasks using a small number of parameters.", "keywords": ["semantic segmentation", "stacked u-nets", "classification"], "authorids": ["sohilas@umd.edu", "tomg@cs.umd.edu", "pallabig@umd.edu", "lsd@umiacs.umd.edu"], "authors": ["Sohil Shah", "Pallabi Ghosh", "Larry S Davis", "Tom Goldstein"], "TL;DR": "Presents new architecture which leverages information globalization power of u-nets in a deeper networks and performs well across tasks without any bells and whistles.", "pdf": "/pdf/083bbe21a2813dea08690a92c6d24c7324860433.pdf", "paperhash": "shah|stacked_unets_a_nofrills_approach_to_natural_image_segmentation", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/stacked-u-nets-a-no-frills-approach-to/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087824727, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SJeI5i0cYQ", "original": "BylcSjY9tQ", "number": 539, "cdate": 1538087822500, "ddate": null, "tcdate": 1538087822500, "tmdate": 1683306255985, "tddate": null, "forum": "SJeI5i0cYQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "EXPLORING DEEP LEARNING USING INFORMATION THEORY TOOLS AND PATCH ORDERING", "abstract": "We present a framework for automatically ordering image patches that enables in-depth analysis of dataset relationship to learnability of a classification task using convolutional neural network. An image patch is a group of pixels residing in a continuous area contained in the sample. Our preliminary experimental results show that an informed smart shuffling of patches at a sample level can expedite training by exposing important features at early stages of training. In addition, we conduct systematic experiments and provide evidence that CNN\u2019s generalization capabilities do not correlate with human recognizable features present in training samples. We utilized the framework not only to show that spatial locality of features within samples do not correlate with generalization, but also to expedite convergence while achieving similar generalization performance. Using multiple network architectures and datasets, we show that ordering image regions using mutual information measure between adjacent patches, enables CNNs to converge in a third of the total steps required to train the same network without patch ordering.", "keywords": ["CNN", "Deep Learning", "Feature Extraction", "Patch Ordering", "Convergence", "Image Classification"], "authorids": ["henok.ghebrechristos@ucdenver.edu", "gita.alaghband@ucdenver.edu"], "authors": ["Henok Ghebrechristos", "Gita Alaghband"], "TL;DR": "Develop new techniques that rely on patch reordering to enable detailed analysis of data-set relationship to training and generalization performances.", "pdf": "/pdf/cc6bba04cbc1e6d44299e7d0f2c5bcd12d1ba109.pdf", "paperhash": "ghebrechristos|exploring_deep_learning_using_information_theory_tools_and_patch_ordering"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087822527, "details": {"replyCount": 2, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rkl85oRqYX", "original": "HJeFwQ-5KQ", "number": 537, "cdate": 1538087822136, "ddate": null, "tcdate": 1538087822136, "tmdate": 1683306255873, "tddate": null, "forum": "rkl85oRqYX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Exploiting Invariant Structures for Compression in Neural Networks", "abstract": "Modern neural networks often require deep compositions of high-dimensional nonlinear functions (wide architecture) to achieve high test accuracy, and thus can have overwhelming number of parameters. Repeated high cost in prediction at test-time makes neural networks ill-suited for devices with constrained memory or computational power. We introduce an efficient mechanism, reshaped tensor decomposition, to compress neural networks by exploiting three types of invariant structures: periodicity, modulation and low rank. Our reshaped tensor decomposition method exploits such invariance structures using a technique called tensorization (reshaping the layers into higher-order tensors) combined with higher order tensor decompositions on top of the tensorized layers. Our compression method improves low rank approximation methods and can be incorporated to (is complementary to) most of the existing compression methods for neural networks to achieve better compression. Experiments on LeNet-5 (MNIST), ResNet-32 (CI- FAR10) and ResNet-50 (ImageNet) demonstrate that our reshaped tensor decomposition outperforms (5% test accuracy improvement universally on CIFAR10) the state-of-the-art low-rank approximation techniques under same compression rate, besides achieving orders of magnitude faster convergence rates.", "keywords": ["Neural Network Compression", "Low Rank Approximation", "Higher Order Tensor Decomposition"], "authorids": ["jiahaosu@terpmail.umd.edu", "jingling@cs.umd.edu", "bobby@cs.umd.edu", "furongh@cs.umd.edu"], "authors": ["Jiahao Su", "Jingling Li", "Bobby Bhattacharjee", "Furong Huang"], "TL;DR": "Compression of neural networks which improves the state-of-the-art low rank approximation techniques and is complementary to most of other compression techniques. ", "pdf": "/pdf/eaea084a6c8a756ac920494b10dd31a7077a4f07.pdf", "paperhash": "su|exploiting_invariant_structures_for_compression_in_neural_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087822167, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HylIcj0qFQ", "original": "S1xdjgqqK7", "number": 534, "cdate": 1538087821558, "ddate": null, "tcdate": 1538087821558, "tmdate": 1683306255745, "tddate": null, "forum": "HylIcj0qFQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Capacity of Deep Neural Networks under Parameter Quantization", "abstract": "Most deep neural networks (DNNs) require complex models to achieve high performance. Parameter quantization is widely used for reducing the implementation complexities. Previous studies on quantization were mostly based on extensive simulation using training data. We choose a different approach and attempt to measure the per-parameter capacity of DNN models and interpret the results to obtain insights on optimum quantization of parameters. This research uses artificially generated data and generic forms of fully connected DNNs, convolutional neural networks, and recurrent neural networks. We conduct memorization and classification tests to study the effects of the number and precision of the parameters on the performance. The model and the per-parameter capacities are assessed by measuring the mutual information between the input and the classified output. We also extend the memorization capacity measurement results to image classification and language modeling tasks. To get insight for parameter quantization when performing real tasks, the training and test performances are compared.", "keywords": ["quantization", "network capacity", "hardware implementation", "network compression"], "authorids": ["dnsgh337@snu.ac.kr", "ssh9919@snu.ac.kr", "wysung@snu.ac.kr"], "authors": ["Yoonho Boo", "Sungho Shin", "and Wonyong Sung"], "TL;DR": "We suggest the sufficient number of bits for representing weights of DNNs and the optimum bits are conservative when solving real problems.", "pdf": "/pdf/fc2db9ff2711eadf8800592969fa458c825749c6.pdf", "paperhash": "boo|capacity_of_deep_neural_networks_under_parameter_quantization"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087821585, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJxz5jRcFm", "original": "H1gsblK9Fm", "number": 516, "cdate": 1538087818192, "ddate": null, "tcdate": 1538087818192, "tmdate": 1750551535964, "tddate": null, "forum": "BJxz5jRcFm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Tangent-Normal Adversarial Regularization for Semi-supervised Learning", "abstract": "The ever-increasing size of modern datasets combined with the difficulty of obtaining label information has made semi-supervised learning of significant practical importance in modern machine learning applications. In comparison to supervised learning, the key difficulty in semi-supervised learning is how to make full use of the unlabeled data. In order to utilize manifold information provided by unlabeled data, we propose a novel regularization called the tangent-normal adversarial regularization, which is composed by two parts. The two parts complement with each other and jointly enforce the smoothness along two different directions that are crucial for semi-supervised learning. One is applied along the tangent space of the data manifold, aiming to enforce local invariance of the classifier on the manifold, while the other is performed on the normal space orthogonal to the tangent space, intending to impose robustness on the classifier against the noise causing the observed data deviating from the underlying data manifold. Both of the two regularizers are achieved by the strategy of virtual adversarial training. Our method has achieved state-of-the-art performance on semi-supervised learning tasks on both artificial dataset and practical datasets.", "keywords": ["semi-supervised learning", "manifold regularization", "adversarial training"], "authorids": ["byu@pku.edu.cn", "pkuwjf@pku.edu.cn", "jwma@math.pku.edu.cn", "zhanxing.zhu@pku.edu.cn"], "authors": ["Bing Yu", "Jingfeng Wu", "Jinwen Ma", "Zhanxing Zhu"], "TL;DR": "We propose a novel manifold regularization strategy based on adversarial training, which can significantly improve the performance of semi-supervised learning.", "pdf": "/pdf/5bec4b35de99569885d09aaa34b96dbdb03f85f7.pdf", "paperhash": "yu|tangentnormal_adversarial_regularization_for_semisupervised_learning", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/tangent-normal-adversarial-regularization-for/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087818219, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rkMk9j0qYm", "original": "rJx9p4qqKm", "number": 499, "cdate": 1538087815137, "ddate": null, "tcdate": 1538087815137, "tmdate": 1683306255463, "tddate": null, "forum": "rkMk9j0qYm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Explainable Adversarial Learning: Implicit Generative Modeling of Random Noise during Training for Adversarial Robustness", "abstract": "We introduce Explainable Adversarial Learning, ExL, an approach for training neural networks that are intrinsically robust to adversarial attacks. We find that the implicit generative modeling of random noise with the same loss function used during posterior maximization, improves a model's understanding of the data manifold furthering adversarial robustness. We prove our approach's efficacy and provide a simplistic visualization tool for understanding adversarial data, using Principal Component Analysis. Our analysis reveals that adversarial robustness, in general, manifests in models with higher variance along the high-ranked principal components. We show that models learnt with our approach perform remarkably well against a wide-range of attacks. Furthermore, combining ExL with state-of-the-art adversarial training extends the robustness of a model, even beyond what it is adversarially trained for, in both white-box and black-box attack scenarios.", "keywords": ["Adversarial Robustness", "PCA variance", "PCA subspace", "Generative Noise Modeling", "Adversarial attack", "Adversarial Robustness Metric"], "authorids": ["pandap@purdue.edu", "kaushik@purdue.edu"], "authors": ["Priyadarshini Panda", "Kaushik Roy"], "TL;DR": "Noise modeling at the input during discriminative training improves adversarial robustness. Propose PCA based evaluation metric for adversarial robustness", "pdf": "/pdf/592abe1818d0fbbb1252656299db843fae4b723d.pdf", "paperhash": "panda|explainable_adversarial_learning_implicit_generative_modeling_of_random_noise_during_training_for_adversarial_robustness"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087815164, "details": {"replyCount": 11, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1GRtj05t7", "original": "HJe22G3YFQ", "number": 494, "cdate": 1538087814246, "ddate": null, "tcdate": 1538087814246, "tmdate": 1683306255347, "tddate": null, "forum": "B1GRtj05t7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "NA", "abstract": "NA", "keywords": [], "authorids": ["v-ziclin@microsoft.com", "lizo@microsoft.com"], "authors": ["NA"], "pdf": "/pdf/9ea920f07de0fe8296a1c3d11fa6cfacca493359.pdf", "paperhash": "na|na"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087814274, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJlaYi05tm", "original": "H1lrYYSqY7", "number": 485, "cdate": 1538087812557, "ddate": null, "tcdate": 1538087812557, "tmdate": 1683306255538, "tddate": null, "forum": "BJlaYi05tm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Geometry of Deep Convolutional Networks", "abstract": " We give a formal procedure for computing preimages of convolutional\n network outputs using the dual basis defined from the set of\n hyperplanes associated with the layers of the network. We point out\n the special symmetry associated with arrangements of hyperplanes of\n convolutional networks that take the form of regular\n multidimensional polyhedral cones. We discuss the efficiency of of\n large number of layers of nested cones that result from incremental\n small size convolutions in order to give a good compromise between\n efficient contraction of data to low dimensions and shaping of\n preimage manifolds. We demonstrate how a specific network flattens a\n non linear input manifold to an affine output manifold and discuss\n it's relevance to understanding classification properties of deep\n networks.", "keywords": ["convolutional networks", "geometry"], "authorids": ["stefanc@kth.se"], "authors": ["Stefan Carlsson"], "TL;DR": "Analysis of deep convolutional networks in terms of associated arrangement of hyperplanes", "pdf": "/pdf/c191a66d661191570a2c485e6bbe213f2621029c.pdf", "paperhash": "carlsson|geometry_of_deep_convolutional_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087812586, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1lFYoRcFm", "original": "Hkg-ERBqFm", "number": 462, "cdate": 1538087808537, "ddate": null, "tcdate": 1538087808537, "tmdate": 1683306255137, "tddate": null, "forum": "r1lFYoRcFm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Quantile Regression Reinforcement Learning with State Aligned Vector Rewards", "abstract": "Learning from a scalar reward in continuous action space environments is difficult and often requires millions if not billions of interactions. We introduce state aligned vector rewards, which are easily defined in metric state spaces and allow our deep reinforcement learning agent to tackle the curse of dimensionality. Our agent learns to map from action distributions to state change distributions implicitly defined in a quantile function neural network. We further introduce a new reinforcement learning technique inspired by quantile regression which does not limit agents to explicitly parameterized action distributions. Our results in high dimensional state spaces show that training with vector rewards allows our agent to learn multiple times faster than an agent training with scalar rewards.", "keywords": ["deep reinforcement learning", "quantile regression", "vector reward"], "authorids": ["richtero@ethz.ch", "wattenhofer@ethz.ch"], "authors": ["Oliver Richter", "Roger Wattenhofer"], "TL;DR": "We train with state aligned vector rewards an agent predicting state changes from action distributions, using a new reinforcement learning technique inspired by quantile regression.", "pdf": "/pdf/341834dbcdff49cfe061cb8cf82ec24315415c00.pdf", "paperhash": "richter|quantile_regression_reinforcement_learning_with_state_aligned_vector_rewards"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087808563, "details": {"replyCount": 12, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1VmtsC5t7", "original": "HJx5gaYZFX", "number": 432, "cdate": 1538087803200, "ddate": null, "tcdate": 1538087803200, "tmdate": 1750551536133, "tddate": null, "forum": "r1VmtsC5t7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Is PGD-Adversarial Training Necessary? Alternative Training via a Soft-Quantization Network with Noisy-Natural Samples Only", "abstract": "Recent work on adversarial attack and defense suggests that projected gradient descent (PGD) is a universal $l_\\infty$ first-order attack, and PGD adversarial training can significantly improve network robustness against a wide range of first-order $l_\\infty$-bounded attacks, represented as the state-of-the-art defense method. However, an obvious weakness of PGD adversarial training is its highly-computational cost in generating adversarial samples, making it computationally infeasible for large and high-resolution real datasets such as the ImageNet dataset. In addition, recent work also has suggested a simple ``close-form'' solution to a robust model on MNIST. Therefore, a natural question raised is that is PGD adversarial training really necessary for robust defense? In this paper, surprisingly, we give a negative answer by proposing a training paradigm that is comparable to PGD adversarial training on several standard datasets, while only using noisy-natural samples. Specifically, we reformulate the min-max objective in PGD adversarial training by a minimization problem to minimize the original network loss plus $l_1$ norms of its gradients evaluated on the inputs (including adversarial samples). The original loss can be solved by natural training; for the $l_1$-norm loss, we propose a computationally-feasible solution by embedding a differentiable soft-quantization layer after the input layer of a network. We show formally that the soft-quantization layer trained with noisy-natural samples is an alternative approach to minimizing the $l_1$-gradient norms as in PGD adversarial training. Extensive empirical evaluations on three standard datasets including MNIST, CIFAR-10 and ImageNet show that our proposed models are comparable to PGD-adversarially-trained models under PGD and BPDA attacks using both cross-entropy and $CW_\\infty$ losses. Remarkably, our method achieves a 24X speed-up on MNIST while maintaining a comparable defensive ability, and for the first time fine-tunes a robust Imagenet model within only two days. Code for the experiments will be released on Github.", "keywords": [], "authorids": ["tzheng4@buffalo.edu", "cchangyou@gmail.com", "kuiren@buffalo.edu"], "authors": ["Tianhang Zheng", "Changyou Chen", "Kui Ren"], "pdf": "/pdf/b318b89968b32accae2883fc733fa51c16ff0995.pdf", "paperhash": "zheng|is_pgdadversarial_training_necessary_alternative_training_via_a_softquantization_network_with_noisynatural_samples_only", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/is-pgd-adversarial-training-necessary/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087803223, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SJfWKsC5K7", "original": "rylEQEbVtm", "number": 420, "cdate": 1538087801141, "ddate": null, "tcdate": 1538087801141, "tmdate": 1683306254948, "tddate": null, "forum": "SJfWKsC5K7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Explaining Neural Networks Semantically and Quantitatively", "abstract": "This paper presents a method to explain the knowledge encoded in a convolutional neural network (CNN) quantitatively and semantically. How to analyze the specific rationale of each prediction made by the CNN presents one of key issues of understanding neural networks, but it is also of significant practical values in certain applications. In this study, we propose to distill knowledge from the CNN into an explainable additive model, so that we can use the explainable model to provide a quantitative explanation for the CNN prediction. We analyze the typical bias-interpreting problem of the explainable model and develop prior losses to guide the learning of the explainable additive model. Experimental results have demonstrated the effectiveness of our method.", "keywords": ["Network interpretability", "deep learning", "knowledge distillation", "convolutional neural networks"], "authorids": ["bridgechen@hust.edu.cn", "chenrunjin@sjtu.edu.cn", "zqs1022@sjtu.edu.cn"], "authors": ["Hao Chen", "Runjin Chen", "Quanshi Zhang"], "TL;DR": "This paper presents a method to explain the knowledge encoded in a convolutional neural network (CNN) quantitatively and semantically.", "pdf": "/pdf/bb6a3070e2e0034f27640e96c4ba817f4d4f19a7.pdf", "paperhash": "chen|explaining_neural_networks_semantically_and_quantitatively"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087801166, "details": {"replyCount": 8, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rJGgFjA9FQ", "original": "SkxM3wWNY7", "number": 415, "cdate": 1538087800298, "ddate": null, "tcdate": 1538087800298, "tmdate": 1750551536209, "tddate": null, "forum": "rJGgFjA9FQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Explaining AlphaGo: Interpreting Contextual Effects in Neural Networks", "abstract": "This paper presents two methods to disentangle and interpret contextual effects that are encoded in a pre-trained deep neural network. Unlike convolutional studies that visualize image appearances corresponding to the network output or a neural activation from a global perspective, our research aims to clarify how a certain input unit (dimension) collaborates with other units (dimensions) to constitute inference patterns of the neural network and thus contribute to the network output. The analysis of local contextual effects w.r.t. certain input units is of special values in real applications. In particular, we used our methods to explain the gaming strategy of the alphaGo Zero model in experiments, and our method successfully disentangled the rationale of each move during the game.", "keywords": ["Interpretability", "Deep learning", "alphaGo"], "authorids": ["lingzenan@sjtu.edu.cn", "11612807@mail.sustc.edu.cn", "yy19970901@ucla.edu", "rqiu@tntech.edu", "sczhu@stat.ucla.edu", "zqs1022@sjtu.edu.cn"], "authors": ["Zenan Ling", "Haotian Ma", "Yu Yang", "Robert C. Qiu", "Song-Chun Zhu", "Quanshi Zhang"], "TL;DR": "This paper presents methods to disentangle and interpret contextual effects that are encoded in a deep neural network.", "pdf": "/pdf/313879d2d9eca2d9e7bc764b0b039c6bbda90f98.pdf", "paperhash": "ling|explaining_alphago_interpreting_contextual_effects_in_neural_networks", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/explaining-alphago-interpreting-contextual/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087800325, "details": {"replyCount": 8, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "ryEquiR9KX", "original": "rJlN0BFqYm", "number": 381, "cdate": 1538087794342, "ddate": null, "tcdate": 1538087794342, "tmdate": 1683306254760, "tddate": null, "forum": "ryEquiR9KX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Applications of Gaussian Processes in Finance", "abstract": "Estimating covariances between financial assets plays an important role in risk management. In practice, when the sample size is small compared to the number of variables, the empirical estimate is known to be very unstable. Here, we propose a novel covariance estimator based on the Gaussian Process Latent Variable Model (GP-LVM). Our estimator can be considered as a non-linear extension of standard factor models with readily interpretable parameters reminiscent of market betas. Furthermore, our Bayesian treatment naturally shrinks the sample covariance matrix towards a more structured matrix given by the prior and thereby systematically reduces estimation errors. Finally, we discuss some financial applications of the GP-LVM model.", "keywords": ["Gaussian Processes", "Latent Variable Model", "Variational Bayes", "Stan", "Asset Pricing", "Portfolio Allocation", "Finance", "CAPM"], "authorids": ["nirwan@fias.uni-frankfurt.de", "bertschinger@fias.uni-frankfurt.de"], "authors": ["Rajbir S. Nirwan", "Nils Bertschinger"], "TL;DR": "Covariance matrix estimation of financial assets with Gaussian Process Latent Variable Models", "pdf": "/pdf/a6f5c8dafb8bfd4cef9b8ac512b360a7bcc20e3b.pdf", "paperhash": "nirwan|applications_of_gaussian_processes_in_finance"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087794366, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rJxY_oCqKQ", "original": "rJe1bw2YK7", "number": 376, "cdate": 1538087793462, "ddate": null, "tcdate": 1538087793462, "tmdate": 1683306254669, "tddate": null, "forum": "rJxY_oCqKQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "A Forensic Representation to Detect Non-Trivial Image Duplicates, and How it Applies to Semantic Segmentation", "abstract": "Manipulation and re-use of images in scientific publications is a recurring problem, at present lacking a scalable solution. Existing tools for detecting image duplication are mostly manual or semi-automated, despite the fact that generating data for a learning-based approach is straightforward, as we here illustrate. This paper addresses the problem of determining if, given two images, one is a manipulated version of the other by means of certain geometric and statistical manipulations, e.g. copy, rotation, translation, scale, perspective transform, histogram adjustment, partial erasing, and compression artifacts. We propose a solution based on a 3-branch Siamese Convolutional Neural Network. The ConvNet model is trained to map images into a 128-dimensional space, where the Euclidean distance between duplicate (respectively, unique) images is no greater (respectively, greater) than 1. Our results suggest that such an approach can serve as tool to improve surveillance of the published and in-peer-review literature for image manipulation. We also show that as a byproduct the network learns useful representations for semantic segmentation, with performance comparable to that of domain-specific models.", "keywords": ["metric learning", "image similarity", "image forensics", "siamese network", "semantic segmentation"], "authorids": ["cicconet@gmail.com", "elliott.hunter@gmail.com", "daverichmond@gmail.com", "daniel_wainstock@hms.harvard.edu", "mary_walsh@hms.harvard.edu"], "authors": ["M. Cicconet", "H. Elliott", "D.L. Richmond", "D. Wainstock", "M. Walsh"], "TL;DR": "A forensic metric to determine if a given image is a copy (with possible manipulation) of another image from a given dataset.", "pdf": "/pdf/89f1555b903d4767c41ec92f855c181478f4e642.pdf", "paperhash": "cicconet|a_forensic_representation_to_detect_nontrivial_image_duplicates_and_how_it_applies_to_semantic_segmentation"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087793487, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "H1xL_iR9Km", "original": "HkgvyzAYKm", "number": 355, "cdate": 1538087789637, "ddate": null, "tcdate": 1538087789637, "tmdate": 1683306254546, "tddate": null, "forum": "H1xL_iR9Km", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "GradMix: Multi-source Transfer across Domains and Tasks", "abstract": "The machine learning and computer vision community is witnessing an unprecedented rate of new tasks being proposed and addressed, thanks to the power of deep convolutional networks to find complex mappings from X to Y. The advent of each task often accompanies the release of a large-scale human-labeled dataset, for supervised training of the deep network. However, it is expensive and time-consuming to manually label sufficient amount of training data. Therefore, it is important to develop algorithms that can leverage off-the-shelf labeled dataset to learn useful knowledge for the target task. While previous works mostly focus on transfer learning from a single source, we study multi-source transfer across domains and tasks (MS-DTT), in a semi-supervised setting. We propose GradMix, a model-agnostic method applicable to any model trained with gradient-based learning rule. GradMix transfers knowledge via gradient descent, by weighting and mixing the gradients from all sources during training. Our method follows a meta-learning objective, by assigning layer-wise weights to the source gradients, such that the combined gradient follows the direction that can minimize the loss for a small set of samples from the target dataset. In addition, we propose to adaptively adjust the learning rate for each mini-batch based on its importance to the target task, and a pseudo-labeling method to leverage the unlabeled samples in the target domain. We perform experiments on two MS-DTT tasks: digit recognition and action recognition, and demonstrate the advantageous performance of the proposed method against multiple baselines.", "keywords": ["Transfer Learning", "Domain Adaptation", "Multi-source Learning"], "authorids": ["lijunnan@u.nus.edu", "ziwei-xu@comp.nus.edu.sg", "yongkang.wong@nus.edu.sg", "qzhao@cs.umn.edu", "mohan@comp.nus.edu.sg"], "authors": ["Junnan Li", "Ziwei Xu", "Yongkang Wong", "Qi Zhao", "Mohan S. Kankanhalli"], "TL;DR": "We propose a gradient-based method to transfer knowledge from multiple sources across different domains and tasks.", "pdf": "/pdf/73e74ed3a04009ef370558aa7dd1107b30affd13.pdf", "paperhash": "li|gradmix_multisource_transfer_across_domains_and_tasks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087789663, "details": {"replyCount": 8, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BkeSusCcYm", "original": "BkeP-5F5tm", "number": 349, "cdate": 1538087788596, "ddate": null, "tcdate": 1538087788596, "tmdate": 1683306254628, "tddate": null, "forum": "BkeSusCcYm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Combining Global Sparse Gradients with Local Gradients", "abstract": "Data-parallel neural network training is network-intensive, so gradient dropping was designed to exchange only large gradients. However, gradient dropping has been shown to slow convergence. We propose to improve convergence by having each node combine its locally computed gradient with the sparse global gradient exchanged over the network. We empirically confirm with machine translation tasks that gradient dropping with local gradients approaches convergence 48% faster than non-compressed multi-node training and 28% faster compared to vanilla gradient dropping. We also show that gradient dropping with a local gradient update does not reduce the model's final quality.", "keywords": ["Distributed training", "stochastic gradient descent", "machine translation"], "authorids": ["a.fikri@ed.ac.uk", "kheafiel@inf.ed.ac.uk"], "authors": ["Alham Fikri Aji", "Kenneth Heafield"], "TL;DR": "We improve gradient dropping (a technique of only exchanging large gradients on distributed training) by incorporating local gradients while doing a parameter update to reduce quality loss and further improve the training time.", "pdf": "/pdf/628263e418804077a88ad78582f60809ff0899bb.pdf", "paperhash": "aji|combining_global_sparse_gradients_with_local_gradients"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087788622, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1MVuoCctX", "original": "Skx3foftY7", "number": 345, "cdate": 1538087787892, "ddate": null, "tcdate": 1538087787892, "tmdate": 1683306254323, "tddate": null, "forum": "r1MVuoCctX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "MAJOR-MINOR LSTMS FOR WORD-LEVEL LANGUAGE MODEL", "abstract": "As a widely-accepted evaluation criterion, complexity has attracted more and more attention in the design of language models. The parameter count is a proxy for complexity, which is often reported and compared in research papers. In general, more parameters means better model performance, but higher complexity. Therefore, reconciling the contradiction between the complexity and the model performance is necessary. In this paper, we propose a simple method to make use of model parameters more effectively, so that the LSTM-based language models can reach better results without the cost of increasing parameters. The method constructs another small-scale LSTM with a part of parameters originally belonging to the vanilla LSTM in each layer, whose output can assist the next layer in processing the output of the vanilla LSTM. We name these two LSTMs Major Minor LSTMs. In experiments, we demonstrate the language model with Major Minor LSTMs surpasses the existing state-of-the-art model on Penn Treebank and WikiText-2 with fewer parameters.", "keywords": ["Language model", "LSTM", "Deep Learning", "NLP"], "authorids": ["shuangk@bupt.edu.cn", "lirui@bupt.edu.cn", "pattygu0622@bupt.edu.cn", "echo_yang@bupt.edu.cn", "jonathan.loo@uwl.ac.uk", "susen@bupt.edu.cn"], "authors": ["Kai Shuang", "Rui Li", "Mengyu Gu", "Qianqian Yang", "Jonathan", "Sen Su"], "pdf": "/pdf/687d3a13ec7c8cae60f3fdb3d7b4549f86045de5.pdf", "paperhash": "shuang|majorminor_lstms_for_wordlevel_language_model"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087787918, "details": {"replyCount": 10, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SkGMOi05FQ", "original": "SJltVUF5tX", "number": 337, "cdate": 1538087786477, "ddate": null, "tcdate": 1538087786477, "tmdate": 1750551536495, "tddate": null, "forum": "SkGMOi05FQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": " Generating Text through Adversarial Training using Skip-Thought Vectors", "abstract": "In the past few years, various advancements have been made in generative models owing to the formulation of Generative Adversarial Networks (GANs). GANs have been shown to perform exceedingly well on a wide variety of tasks pertaining to image generation and style transfer. In the field of Natural Language Processing, word embeddings such as word2vec and GLoVe are state-of-the-art methods for applying neural network models on textual data. Attempts have been made for utilizing GANs with word embeddings for text generation. This work presents an approach to text generation using Skip-Thought sentence embeddings in conjunction with GANs based on gradient penalty functions and f-measures. The results of using sentence embeddings with GANs for generating text conditioned on input information are comparable to the approaches where word embeddings are used. ", "keywords": ["Natural Language Generation", "Computation and Language", "Machine Learning", "Generative Adversarial Networks", "Sentence Embeddings"], "authorids": ["afrozsahamad@gmail.com"], "authors": ["Afroz Ahamad"], "TL;DR": "Generating text using sentence embeddings from Skip-Thought Vectors with the help of Generative Adversarial Networks.", "pdf": "/pdf/3bcb1f3000c14da180bde44e4c06e4bae7c4b14f.pdf", "paperhash": "ahamad|generating_text_through_adversarial_training_using_skipthought_vectors", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 4 code implementations](https://www.catalyzex.com/paper/generating-text-through-adversarial-training/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087786502, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rJlGdsC9Ym", "original": "SJxsYwD5Y7", "number": 332, "cdate": 1538087785621, "ddate": null, "tcdate": 1538087785621, "tmdate": 1683306254135, "tddate": null, "forum": "rJlGdsC9Ym", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Learning of Sophisticated Curriculums by viewing them as Graphs over Tasks", "abstract": "Curriculum learning consists in learning a difficult task by first training on an easy version of it, then on more and more difficult versions and finally on the difficult task. To make this learning efficient, given a curriculum and the current learning state of an agent, we need to find what are the good next tasks to train the agent on.\nTeacher-Student algorithms assume that the good next tasks are the ones on which the agent is making the fastest progress or digress. We first simplify and improve them. However, two problematic situations where the agent is mainly trained on tasks it can't learn yet or it already learnt may occur.\nTherefore, we introduce a new algorithm using min max ordered curriculums that assumes that the good next tasks are the ones that are learnable but not learnt yet. It outperforms Teacher-Student algorithms on small curriculums and significantly outperforms them on sophisticated ones with numerous tasks.", "keywords": ["learning", "curriculum learning", "reinforcement learning"], "authorids": ["lcswillems@gmail.com", "yoshua.bengio@umontreal.ca"], "authors": ["Lucas Willems", "Yoshua Bengio"], "TL;DR": "We present a new algorithm for learning by curriculum based on the notion of mastering rate that outperforms previous algorithms.", "pdf": "/pdf/8e2b2c2206bfb2092e6edbd9ec5d9cfca1308db4.pdf", "paperhash": "willems|learning_of_sophisticated_curriculums_by_viewing_them_as_graphs_over_tasks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087785647, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HJN6DiAcKQ", "original": "HygNL6ucKQ", "number": 307, "cdate": 1538087781269, "ddate": null, "tcdate": 1538087781269, "tmdate": 1683306254031, "tddate": null, "forum": "HJN6DiAcKQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Engaging Image Captioning Via Personality", "abstract": "Standard image captioning tasks such as COCO and Flickr30k are factual, neutral in tone and (to a human) state the obvious (e.g., \u201ca man playing a guitar\u201d). While such tasks are useful to verify that a machine understands the content of an image, they are not engaging to humans as captions. With this in mind we define a new task, Personality-Captions, where the goal is to be as engaging to humans as possible by incorporating controllable style and personality traits.We collect and release a large dataset of 201,858 of such captions conditioned over 215 possible traits. We build models that combine existing work from (i) sentence representations (Mazar\u00e9 et al., 2018) with Transformers trained on 1.7 billion dialogue examples; and (ii) image representations (Mahajan et al., 2018) with ResNets trained on 3.5 billion social media images. We obtain state-of-the-art performance on Flickr30k and COCO, and strong performance on our new task. Finally, online evaluations validate that our task and models are engaging to humans, with our best model close to human performance.", "keywords": ["image", "captioning", "captions", "vision", "language"], "authorids": ["kshuster@fb.com", "samuelhumeau@fb.com", "hexianghu@fb.com", "abordes@fb.com", "jaseweston@gmail.com"], "authors": ["Kurt Shuster", "Samuel Humeau", "Hexiang Hu", "Antoine Bordes", "Jason Weston"], "TL;DR": "We develop engaging image captioning models conditioned on personality that are also state of the art on regular captioning tasks.", "pdf": "/pdf/54bbd4c01aa9aa7912080d66be76b904448d6fa1.pdf", "paperhash": "shuster|engaging_image_captioning_via_personality"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087781296, "details": {"replyCount": 5, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "ryxnDoRcK7", "original": "ryx8T5_9Ym", "number": 298, "cdate": 1538087779672, "ddate": null, "tcdate": 1538087779672, "tmdate": 1750551536615, "tddate": null, "forum": "ryxnDoRcK7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Estimating Heterogeneous Treatment Effects Using Neural Networks With The Y-Learner", "abstract": "We develop the Y-learner for estimating heterogeneous treatment effects in experimental and observational studies. The Y-learner is designed to leverage the abilities of neural networks to optimize multiple objectives and continually update, which allows for better pooling of underlying feature information between treatment and control groups. We evaluate the Y-learner on three test problems: (1) A set of six simulated data benchmarks from the literature. (2) A real-world large-scale experiment on voter persuasion. (3) A task from the literature that estimates artificially generated treatment effects on MNIST didgits. The Y-learner achieves state of the art results on two of the three tasks. On the MNIST task, it gets the second best results. ", "keywords": ["causal inference", "CATE estimation", "ITE", "deep learning"], "authorids": ["bstadie@berkeley.edu", "srk@berkeley.edu", "nikitavemuri@berkeley.edu", "sekhon@berkeley.edu"], "authors": ["Bradly C. Stadie", "S\u00f6ren R. K\u00fcnzel", "Nikita Vemuri", "Jasjeet S. Sekhon"], "TL;DR": "We develop a CATE estimation strategy that takes advantage some of the intriguing properties of neural networks. ", "pdf": "/pdf/3bed67b4ba82c3df9c5b0f860ba6202fa25556ac.pdf", "paperhash": "stadie|estimating_heterogeneous_treatment_effects_using_neural_networks_with_the_ylearner", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/estimating-heterogeneous-treatment-effects/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087779697, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SkeiPsAqK7", "original": "rklCRcYQYm", "number": 292, "cdate": 1538087778618, "ddate": null, "tcdate": 1538087778618, "tmdate": 1683306253804, "tddate": null, "forum": "SkeiPsAqK7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "NA", "abstract": "NA", "keywords": [], "authorids": ["dai.nguyen@monash.edu", "tu.dinh.nguyen@monash.edu", "dinh.phung@monash.edu"], "authors": ["Dai Quoc Nguyen", "Tu Dinh Nguyen", "Dinh Phung"], "pdf": "/pdf/d61e34d57492be8f8d75ae9004af16d2c769666f.pdf", "paperhash": "nguyen|na"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087778646, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rkzcvoA9YX", "original": "H1gEvvdcFQ", "number": 288, "cdate": 1538087777908, "ddate": null, "tcdate": 1538087777908, "tmdate": 1683306253637, "tddate": null, "forum": "rkzcvoA9YX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Few-Shot Learning by Exploiting Object Relation", "abstract": "\nFew-shot learning trains image classifiers over datasets with few examples per category. \nIt poses challenges for the optimization algorithms, which typically require many examples to fine-tune the model parameters for new categories. \nDistance-learning-based approaches avoid the optimization issue by embedding the images into a metric space and applying the nearest neighbor classifier for new categories. In this paper, we propose to exploit the object-level relation to learn the image relation feature, which is converted into a distance directly.\nFor a new category, even though its images are not seen by the model, some objects may appear in the training images. Hence, object-level relation is useful for inferring the relation of images from unseen categories. Consequently, our model generalizes well for new categories without fine-tuning.\nExperimental results on benchmark datasets show that our approach outperforms state-of-the-art methods.", "keywords": ["few-shot learning", "relation learning"], "authorids": ["liangqu.long@gmail.com", "wangwei@comp.nus.edu.sg", "jungel2star@gmail.com", "meihui_zhang@bit.edu.cn", "linqian@comp.nus.edu.sg"], "authors": ["Liangqu Long", "Wei Wang", "Jun Wen", "Meihui Zhang", "Qian Lin"], "TL;DR": "Few-shot learning by exploiting the object-level relation to learn the image-level relation (similarity)", "pdf": "/pdf/3f6bb2bf7a11626410dd7453f5b5b16d782af08c.pdf", "paperhash": "long|fewshot_learning_by_exploiting_object_relation"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087777935, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BkVvwj0qFm", "original": "B1gkIeHFdQ", "number": 273, "cdate": 1538087775246, "ddate": null, "tcdate": 1538087775246, "tmdate": 1683306253584, "tddate": null, "forum": "BkVvwj0qFm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Geometric Operator Convolutional Neural Network", "abstract": "The Convolutional Neural Network (CNN) has been successfully applied in many fields during recent decades; however it lacks the ability to utilize prior domain knowledge when dealing with many realistic problems. We present a framework called Geometric Operator Convolutional Neural Network (GO-CNN) that uses domain knowledge, wherein the kernel of the first convolutional layer is replaced with a kernel generated by a geometric operator function. This framework integrates many conventional geometric operators, which allows it to adapt to a diverse range of problems. Under certain conditions, we theoretically analyze the convergence and the bound of the generalization errors between GO-CNNs and common CNNs. Although the geometric operator convolution kernels have fewer trainable parameters than common convolution kernels, the experimental results indicate that GO-CNN performs more accurately than common CNN on CIFAR-10/100. Furthermore, GO-CNN reduces dependence on the amount of training examples and enhances adversarial stability.", "keywords": ["Convolutional Neural Network", "Geometric Operator", "Image Classification", "Theoretical Analysis"], "authorids": ["yangma@mail.ustc.edu.cn", "seeing@mail.ustc.edu.cn", "yangzw@ustc.edu.cn"], "authors": ["Yangling Ma", "Yixin Luo", "Zhouwang Yang"], "TL;DR": "Traditional image processing algorithms are combined with Convolutional Neural Networks\uff0ca new neural network.", "pdf": "/pdf/a85e09ffc3461f9c88386b6fe2d2be5daeb40710.pdf", "paperhash": "ma|geometric_operator_convolutional_neural_network"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087775272, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HJgHwi0ctm", "original": "r1ecjkI5t7", "number": 258, "cdate": 1538087772566, "ddate": null, "tcdate": 1538087772566, "tmdate": 1683306253493, "tddate": null, "forum": "HJgHwi0ctm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "NA", "abstract": "NA", "keywords": [], "authorids": ["masoud.faraki@monash.edu", "m.baktashmotlagh@qut.edu.au", "tom.drummond@monash.edu", "mathieu.salzmann@epfl.ch"], "authors": ["Masoud Faraki", "Mahsa Baktashmotlagh", "Tom Drummond", "Mathieu Salzmann"], "pdf": "/pdf/c5b6a0766f591eb9b067f32e9af1469b9274338c.pdf", "paperhash": "faraki|na"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087772591, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rklNwjCcYm", "original": "BJeeD5WFt7", "number": 257, "cdate": 1538087772385, "ddate": null, "tcdate": 1538087772385, "tmdate": 1683306253382, "tddate": null, "forum": "rklNwjCcYm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Understanding and Improving Sequence-Labeling NER with Self-Attentive LSTMs", "abstract": "This paper improves upon the line of research that formulates named entity recognition (NER) as a sequence-labeling problem. We use so-called black-box long short-term memory (LSTM) encoders to achieve state-of-the-art results while providing insightful understanding of what the auto-regressive model learns with a parallel self-attention mechanism. Specifically, we decouple the sequence-labeling problem of NER into entity chunking, e.g., Barack_B Obama_E was_O elected_O, and entity typing, e.g., Barack_PERSON Obama_PERSON was_NONE elected_NONE, and analyze how the model learns to, or has difficulties in, capturing text patterns for each of the subtasks. The insights we gain then lead us to explore a more sophisticated deep cross-Bi-LSTM encoder, which proves better at capturing global interactions given both empirical results and a theoretical justification.", "keywords": ["interpretability", "sequence labeling", "named entity recognition", "LSTM", "attention"], "authorids": ["jacobvsdanniel@iis.sinica.edu.tw", "ma@iis.sinica.edu.tw"], "authors": ["Peng-Hsuan Li", "Wei-Yun Ma"], "TL;DR": "We provide insightful understanding of sequence-labeling NER and propose to use two types of cross structures, both of which bring theoretical and empirical improvements.", "pdf": "/pdf/3c02dfa85c4088bf099ad163894f0f757eace74d.pdf", "paperhash": "li|understanding_and_improving_sequencelabeling_ner_with_selfattentive_lstms"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087772411, "details": {"replyCount": 8, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rJEGwo0cFX", "original": "rylrGRUctm", "number": 246, "cdate": 1538087770423, "ddate": null, "tcdate": 1538087770423, "tmdate": 1683306253298, "tddate": null, "forum": "rJEGwo0cFX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "An Attention-Based Model for Learning Dynamic Interaction Networks", "abstract": "While machine learning models achieve human-comparable performance on sequential data, exploiting structured knowledge is still a challenging problem. Spatio-temporal graphs have been proved to be a useful tool to abstract interaction graphs and previous works exploits carefully designed feed-forward architecture to preserve such structure. We argue to scale such network design to real-world problem, a model needs to automatically learn a meaningful representation of the possible relations. Learning such interaction structure is not trivial: on the one hand, a model has to discover the hidden relations between different problem factors in an unsupervised way; on the other hand, the mined relations have to be interpretable. \n\nIn this paper, we propose an attention module able to project a graph sub-structure in a fixed size embedding, preserving the influence that the neighbours exert on a given vertex. On a comprehensive evaluation done on real-world as well as toy task, we found our model competitive against strong baselines.", "keywords": ["dynamic networks", "interaction graphs", "attention model"], "authorids": ["sandro001@e.ntu.edu.sg", "vincent.zheng@adsc-create.edu.sg", "hongyun.c@adsc.com.sg", "cambria@ntu.edu.sg"], "authors": ["Sandro Cavallari", "Vincent W Zheng", "Hongyun Cai", "Erik Cambria"], "TL;DR": "A graph neural network able to automatically learn and leverage a dynamic interactive graph structure", "pdf": "/pdf/8ea5a546c6f09eaae2aa017c0140f80798606ae7.pdf", "paperhash": "cavallari|an_attentionbased_model_for_learning_dynamic_interaction_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087770448, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HkxzDiAcK7", "original": "ryxcxEDcF7", "number": 242, "cdate": 1538087769709, "ddate": null, "tcdate": 1538087769709, "tmdate": 1683306253186, "tddate": null, "forum": "HkxzDiAcK7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Classification of Building Noise Type/Position via Supervised Learning", "abstract": "This paper presents noise type/position classification of various impact noises generated in a building which is a serious conflict issue in apartment complexes. For this study, a collection of floor impact noise dataset is recorded with a single microphone. Noise types/positions are selected based on a report by the Floor Management Center under Korea Environmental Corporation. Using a convolutional neural networks based classifier, the impact noise signals converted to log-scaled Mel-spectrograms are classified into noise types or positions. Also, our model is evaluated on a standard environmental sound dataset ESC-50 to show extensibility on environmental sound classification.\n", "keywords": ["impact noise", "noise type classification", "noise position classification", "convolutional neural networks", "transfer learning"], "authorids": ["its_me_chy@snu.ac.kr", "coupon3@snu.ac.kr", "tl7qns7ch@snu.ac.kr", "wseong@snu.ac.kr"], "authors": ["Hwiyong Choi", "Haesang Yang", "Seungjun Lee", "Woojae Seong"], "TL;DR": "This paper presents noise type/position classification of various impact noises generated in a building which is a serious conflict issue in apartment complexes", "pdf": "/pdf/1116ed60e7f575df2bd31ccd7f548e024328ba78.pdf", "paperhash": "choi|classification_of_building_noise_typeposition_via_supervised_learning"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087769733, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rkelDoCqFX", "original": "H1x3qw8ut7", "number": 232, "cdate": 1538087767901, "ddate": null, "tcdate": 1538087767901, "tmdate": 1750551536912, "tddate": null, "forum": "rkelDoCqFX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Transfer Learning via Unsupervised Task Discovery for Visual Question Answering", "abstract": "We study how to leverage off-the-shelf visual and linguistic data to cope with out-of-vocabulary answers in visual question answering. Existing large-scale visual data with annotations such as image class labels, bounding boxes and region descriptions are good sources for learning rich and diverse visual concepts. However, it is not straightforward how the visual concepts should be captured and transferred to visual question answering models due to missing link between question dependent answering models and visual data without question or task specification. We tackle this problem in two steps: 1) learning a task conditional visual classifier based on unsupervised task discovery and 2) transferring and adapting the task conditional visual classifier to visual question answering models. Specifically, we employ linguistic knowledge sources such as structured lexical database (e.g. Wordnet) and visual descriptions for unsupervised task discovery, and adapt a learned task conditional visual classifier to answering unit in a visual question answering model. We empirically show that the proposed algorithm generalizes to unseen answers successfully using the knowledge transferred from the visual data.", "keywords": [], "authorids": ["shgusdngogo@postech.ac.kr", "carpedm20@gmail.com", "choco1916@postech.ac.kr", "bhhan@snu.ac.kr"], "authors": ["Hyeonwoo Noh", "Taehoon Kim", "Jonghwan Mun", "Bohyung Han"], "pdf": "/pdf/4404c4f77da8522419c85bc9325729d063c50690.pdf", "paperhash": "noh|transfer_learning_via_unsupervised_task_discovery_for_visual_question_answering", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/transfer-learning-via-unsupervised-task/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087767926, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HJz1vo0cYX", "original": "SJlcuGf5KQ", "number": 227, "cdate": 1538087767014, "ddate": null, "tcdate": 1538087767014, "tmdate": 1683306252973, "tddate": null, "forum": "HJz1vo0cYX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Confidence Calibration in Deep Neural Networks through Stochastic Inferences", "abstract": "We propose a generic framework to calibrate accuracy and confidence (score) of a prediction through stochastic inferences in deep neural networks. We first analyze relation between variation of multiple model parameters for a single example inference and variance of the corresponding prediction scores by Bayesian modeling of stochastic regularization. Our empirical observation shows that accuracy and score of a prediction are highly correlated with variance of multiple stochastic inferences given by stochastic depth or dropout. Motivated by these facts, we design a novel variance-weighted confidence-integrated loss function that is composed of two cross-entropy loss terms with respect to ground-truth and uniform distribution, which are balanced by variance of stochastic prediction scores. The proposed loss function enables us to learn deep neural networks that predict confidence calibrated scores using a single inference. Our algorithm presents outstanding confidence calibration performance and improves classification accuracy with two popular stochastic regularization techniques---stochastic depth and dropout---in multiple models and datasets; it alleviates overconfidence issue in deep neural networks significantly by training networks to achieve prediction accuracy proportional to confidence of prediction.", "keywords": ["Variance-Weighted Confidence-Integrated loss", "Confidence Calibration", "Stochastic Regularization", "Stochastic Inferences"], "authorids": ["seonguk@snu.ac.kr", "hsseo@postech.ac.kr", "bhhan@snu.ac.kr"], "authors": ["Seonguk Seo", "Paul Hongsuck Seo", "Bohyung Han"], "TL;DR": "We propose a framework to learn confidence-calibrated networks by designing a novel loss function that incorporates predictive uncertainty estimated through stochastic inferences.", "pdf": "/pdf/ff5178f8d59841291e2b8cd6143fa410cb20757f.pdf", "paperhash": "seo|confidence_calibration_in_deep_neural_networks_through_stochastic_inferences"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087767041, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "H1eRIoA5Y7", "original": "SyxxVUUqtm", "number": 219, "cdate": 1538087765601, "ddate": null, "tcdate": 1538087765601, "tmdate": 1683306252949, "tddate": null, "forum": "H1eRIoA5Y7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Low-Cost Parameterizations of Deep Convolutional Neural Networks", "abstract": "Convolutional Neural Networks (CNNs) filter the input data using a series of spatial convolution operators with compactly supported stencils and point-wise nonlinearities.\nCommonly, the convolution operators couple features from all channels.\nFor wide networks, this leads to immense computational cost in the training of and prediction with CNNs.\nIn this paper, we present novel ways to parameterize the convolution more efficiently, aiming to decrease the number of parameters in CNNs and their computational complexity.\nWe propose new architectures that use a sparser coupling between the channels and thereby reduce both the number of trainable weights and the computational cost of the CNN.\nOur architectures arise as new types of residual neural network (ResNet) that can be seen as discretizations of a Partial Differential Equations (PDEs) and thus have predictable theoretical properties. Our first architecture involves a convolution operator with a special sparsity structure, and is applicable to a large class of CNNs. Next, we present an architecture that can be seen as a discretization of a diffusion reaction PDE, and use it with three different convolution operators. We outline in our experiments that the proposed architectures, although considerably reducing the number of trainable weights, yield comparable accuracy to existing CNNs that are fully coupled in the channel dimension.\n", "keywords": ["Deep Learning", "Classification", "Partial Differential Equations"], "authorids": ["erant@bgu.ac.il", "lruthotto@emory.edu", "sharmic@post.bgu.ac.il", "sapirza@post.bgu.ac.il", "ehaber@eos.ubc.ca"], "authors": ["Eran Treister", "Lars Ruthotto", "Michal Sharoni", "Sapir Zafrani", "Eldad Haber"], "TL;DR": "This paper introduces efficient and economic parametrizations of convolutional neural networks motivated by partial differential equations ", "pdf": "/pdf/d1d189e0c98c5097bf17c0bef05d88e749dd8937.pdf", "paperhash": "treister|lowcost_parameterizations_of_deep_convolutional_neural_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087765628, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SJlp8sA5Y7", "original": "BkgbI1P_K7", "number": 215, "cdate": 1538087764893, "ddate": null, "tcdate": 1538087764893, "tmdate": 1683306252901, "tddate": null, "forum": "SJlp8sA5Y7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "An Efficient Network for Predicting Time-Varying Distributions", "abstract": "While deep neural networks have achieved groundbreaking prediction results in many tasks, there is a class of data where existing architectures are not optimal -- sequences of probability distributions. Performing forward prediction on sequences of distributions has many important applications. However, there are two main challenges in designing a network model for this task. First, neural networks are unable to encode distributions compactly as each node encodes just a real value. A recent work of Distribution Regression Network (DRN) solved this problem with a novel network that encodes an entire distribution in a single node, resulting in improved accuracies while using much fewer parameters than neural networks. However, despite its compact distribution representation, DRN does not address the second challenge, which is the need to model time dependencies in a sequence of distributions. In this paper, we propose our Recurrent Distribution Regression Network (RDRN) which adopts a recurrent architecture for DRN. The combination of compact distribution representation and shared weights architecture across time steps makes RDRN suitable for modeling the time dependencies in a distribution sequence. Compared to neural networks and DRN, RDRN achieves the best prediction performance while keeping the network compact.", "keywords": ["Distribution regression", "Distribution sequence", "Forward prediction"], "authorids": ["koukl@comp.nus.edu.sg", "leehk@bii.a-star.edu.sg", "ngtk@comp.nus.edu.sg", "jorges@nus.edu.sg"], "authors": ["Connie Kou", "Hwee Kuan Lee", "Teck Khim Ng", "Jorge Sanz"], "TL;DR": "We propose an efficient recurrent network model for forward prediction on time-varying distributions.", "pdf": "/pdf/f899d9e4fdefc89604a3b354253217f4dbc35b11.pdf", "paperhash": "kou|an_efficient_network_for_predicting_timevarying_distributions"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087764918, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HJEhIjA9tQ", "original": "HklfIZ8qYm", "number": 212, "cdate": 1538087764357, "ddate": null, "tcdate": 1538087764357, "tmdate": 1683306252703, "tddate": null, "forum": "HJEhIjA9tQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Encoder Discriminator Networks for Unsupervised Representation Learning", "abstract": "Learning representations of data samples in an unsupervised way is needed whenever computers have to reason about unlabeled data. Applications range from compressing and denoising data to super-resolution, generating new samples from a given sample distribution and much more.\nIn this work, we use information entropy and a little game to motivate a new encoder discriminator architecture in order to learn unsupervised latent representations. Inspired by the game \"Taboo\", we train an encoder network to generate a meaningful representation of one particular sample of a dataset. Using this description, a discriminator network then has to retrieve the same sample from the whole dataset. We show that learning in this manner on many different samples repeatedly minimizes the information entropy given the latent description and, thus, forces the encoder network to make precise descriptions that can be interpreted by the discriminator.\nWe provide first results of this method on the MNIST and the Fashion MNIST dataset.", "keywords": ["representation learning", "unsupervised", "encoder discriminator"], "authorids": ["nils.wandel@ais.uni-bonn.de"], "authors": ["Nils Wandel"], "pdf": "/pdf/650ff29cd475b17a3b661d645286851874829a11.pdf", "paperhash": "wandel|encoder_discriminator_networks_for_unsupervised_representation_learning"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087764384, "details": {"replyCount": 8, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1EiIsCctm", "original": "HJl35hBqFQ", "number": 207, "cdate": 1538087763475, "ddate": null, "tcdate": 1538087763475, "tmdate": 1683306252877, "tddate": null, "forum": "B1EiIsCctm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Improving Gaussian mixture latent variable model convergence with Optimal Transport", "abstract": "Generative models with both discrete and continuous latent variables are highly motivated by the structure of many real-world data sets. They present, however, subtleties in training often manifesting in the discrete latent variable not being leveraged. In this paper, we show why such models struggle to train using traditional log-likelihood maximization, and that they are amenable to training using the Optimal Transport framework of Wasserstein Autoencoders. We find our discrete latent variable to be fully leveraged by the model when trained, without any modifications to the objective function or significant fine tuning. Our model generates comparable samples to other approaches while using relatively simple neural networks, since the discrete latent variable carries much of the descriptive burden. Furthermore, the discrete latent provides significant control over generation.", "keywords": ["optimal transport", "wasserstein autoencoder", "variational autoencoder", "latent variable modeling", "generative modeling", "discrete latent variables"], "authorids": ["benoit.gaujac.16@ucl.ac.uk", "ilya@asidatascience.com", "david.barber@ucl.ac.uk"], "authors": ["Benoit Gaujac", "Ilya Feige", "David Barber"], "TL;DR": "This paper shows that the Wasserstein distance objective enables the training of latent variable models with discrete latents in a case where the Variational Autoencoder objective fails to do so.", "pdf": "/pdf/1b2588ea56c81ffda375c62ad930684b3f6dfaec.pdf", "paperhash": "gaujac|improving_gaussian_mixture_latent_variable_model_convergence_with_optimal_transport"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087763500, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BkMqUiA5KX", "original": "BkeKhUr9Ym", "number": 200, "cdate": 1538087762160, "ddate": null, "tcdate": 1538087762160, "tmdate": 1683306252535, "tddate": null, "forum": "BkMqUiA5KX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Improving latent variable descriptiveness by modelling rather than ad-hoc factors", "abstract": "Powerful generative models, particularly in Natural Language Modelling, are commonly trained by maximizing a variational lower bound on the data log likelihood. These models often suffer from poor use of their latent variable, with ad-hoc annealing factors used to encourage retention of information in the latent variable. We discuss an alternative and general approach to latent variable modelling, based on an objective that encourages a perfect reconstruction by tying a stochastic autoencoder with a variational autoencoder (VAE). This ensures by design that the latent variable captures information about the observations, whilst retaining the ability to generate well. Interestingly, although our model is fundamentally different to a VAE, the lower bound attained is identical to the standard VAE bound but with the addition of a simple pre-factor; thus, providing a formal interpretation of the commonly used, ad-hoc pre-factors in training VAEs.", "keywords": ["generative modelling", "latent variable modelling", "variational autoencoders", "variational inference", "natural language processing"], "authorids": ["amansbridge@turing.ac.uk", "roberto.fierimonte@gmail.com", "ilya@asidatascience.com", "david.barber@ucl.ac.uk"], "authors": ["Alex Mansbridge", "Roberto Fierimonte", "Ilya Feige", "David Barber"], "TL;DR": "This paper introduces a novel generative modelling framework that avoids latent-variable collapse and clarifies the use of certain ad-hoc factors in training Variational Autoencoders.", "pdf": "/pdf/2fb1e8f6460e200f71d1449c08d1be54029a421a.pdf", "paperhash": "mansbridge|improving_latent_variable_descriptiveness_by_modelling_rather_than_adhoc_factors"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087762187, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rkGqLoR5tX", "original": "r1xG37H9KX", "number": 199, "cdate": 1538087761974, "ddate": null, "tcdate": 1538087761974, "tmdate": 1683306252435, "tddate": null, "forum": "rkGqLoR5tX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "ODIN: Outlier Detection In Neural Networks", "abstract": "Adoption of deep learning in safety-critical systems raise the need for understanding what deep neural networks do not understand. Several methodologies to estimate model uncertainty have been proposed, but these methodologies constrain either how the neural network is trained or constructed. We present Outlier Detection In Neural networks (ODIN), an assumption-free method for detecting outlier observations during prediction, based on principles widely used in manufacturing process monitoring. By using a linear approximation of the hidden layer manifold, we add prediction-time outlier detection to models after training without altering architecture or training. We demonstrate that ODIN efficiently detect outliers during prediction on Fashion-MNIST, ImageNet-synsets and speech command recognition.", "keywords": ["Outlier Detection", "Model Uncertainty", "Safety"], "authorids": ["rickard.sjoegren@sartorius-stedim.com", "johan.trygg@sartorius-stedim.com"], "authors": ["Rickard Sj\u00f6gren", "Johan Trygg"], "TL;DR": "An add-on method for deep learning to detect outliers during prediction-time", "pdf": "/pdf/b9cdf844e8554e0805173a7fe19028e20a74f52d.pdf", "paperhash": "sj\u00f6gren|odin_outlier_detection_in_neural_networks"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087762001, "details": {"replyCount": 9, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rygPUoR9YQ", "original": "Skx-c3M9K7", "number": 183, "cdate": 1538087759023, "ddate": null, "tcdate": 1538087759023, "tmdate": 1750551537108, "tddate": null, "forum": "rygPUoR9YQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Compositional GAN: Learning Conditional Image Composition", "abstract": "Generative Adversarial Networks (GANs) can produce images of surprising complexity and realism, but are generally structured to sample from a single latent source ignoring the explicit spatial interaction between multiple entities that could be present in a scene. Capturing such complex interactions between different objects in the world, including their relative scaling, spatial layout, occlusion, or viewpoint transformation is a challenging problem. In this work, we propose to model object composition in a GAN framework as a self-consistent composition-decomposition network. Our model is conditioned on the object images from their marginal distributions and can generate a realistic image from their joint distribution. We evaluate our model through qualitative experiments and user evaluations in scenarios when either paired or unpaired examples for the individual object images and the joint scenes are given during training. Our results reveal that the learned model captures potential interactions between the two object domains given as input to output new instances of composed scene at test time in a reasonable fashion.", "keywords": ["Image Composition", "GAN", "Conditional Image generation"], "authorids": ["sazadi@berkeley.edu", "pathak@berkeley.edu", "sayna@berkeley.edu", "trevor@eecs.berkeley.edu"], "authors": ["Samaneh Azadi", "Deepak Pathak", "Sayna Ebrahimi", "Trevor Darrell"], "TL;DR": "We develop a novel approach to model object compositionality in images in a GAN framework.", "pdf": "/pdf/2857583b841d73d9bafd705a55f1de5c4a26810a.pdf", "paperhash": "azadi|compositional_gan_learning_conditional_image_composition", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/compositional-gan-learning-conditional-image/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087759048, "details": {"replyCount": 9, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "S1eL8i0cFm", "original": "HkelP9CFK7", "number": 176, "cdate": 1538087757774, "ddate": null, "tcdate": 1538087757774, "tmdate": 1683306252232, "tddate": null, "forum": "S1eL8i0cFm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Cosine similarity-based Adversarial process", "abstract": "An adversarial process between two deep neural networks is a promising approach to train robust networks. In this study, we propose a framework for training networks that eliminates subsidiary information via the adversarial process. The objective of the proposed framework is to train a primary model that is robust to existing subsidiary information. This primary model can be used for various recognition tasks, such as digit recognition and speaker identification. Subsidiary information refers to the factors that might decrease the performance of the primary model such as channel information in speaker recognition and noise information in digit recognition.\nOur proposed framework comprises two discriminative models for the primary and subsidiary task, as well as an encoder network for feature representation. A subsidiary task is an operation associated with subsidiary information such as identifying the noise type. The discriminative model for the subsidiary task is trained for modeling the dependency of subsidiary class labels on codes from the encoder. Therefore, we expect that subsidiary information could be eliminated by training the encoder to reduce the dependency between the class labels and codes. In order to do so, we train the weight parameters of the subsidiary model; then, we develop the codes and the parameters of subsidiary model to make them orthogonal. For this purpose, we design a loss function to train the encoder based on cosine similarity between the weight parameters of the subsidiary model and codes. Finally, the proposed framework involves repeatedly performing the adversarial process of modeling the subsidiary information and eliminating it. Furthermore, we discuss possible applications of the proposed framework: reducing channel information for speaker identification and domain information for unsupervised domain adaptation. ", "keywords": ["adversarial process", "cosine similarity", "speaker identification", "domain adaptation"], "authorids": ["zhasgone@naver.com", "shimhyejin930615@gmail.com", "aberforth19@naver.com", "heisco@hanmail.net", "ysh901108@naver.com", "hjyu@uos.ac.kr"], "authors": ["Hee-Soo Heo", "Hye-Jin Shim", "Jee-Weon Jung", "IL-Ho Yang", "Sung-Hyun Yoon", "Ha-Jin Yu"], "pdf": "/pdf/ebda0b9bf2a23a70e3248a2379987d8ce425649d.pdf", "paperhash": "heo|cosine_similaritybased_adversarial_process"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087757801, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rylU8oRctX", "original": "rkxiRzXcKX", "number": 175, "cdate": 1538087757595, "ddate": null, "tcdate": 1538087757595, "tmdate": 1683306252169, "tddate": null, "forum": "rylU8oRctX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Learning with Little Data: Evaluation of Deep Learning Algorithms", "abstract": "Deep learning has become a widely used tool in many computational and classification problems. \nNevertheless obtaining and labeling data, which is needed for strong results, is often expensive or even not possible. \nIn this paper three different algorithmic approaches to deal with limited access to data are evaluated and compared to each other. \nWe show the drawbacks and benefits of each method. \nOne successful approach, especially in one- or few-shot learning tasks, is the use of external data during the classification task. \nAnother successful approach, which achieves state of the art results in semi-supervised learning (SSL) benchmarks, is consistency regularization.\nEspecially virtual adversarial training (VAT) has shown strong results and will be investigated in this paper. \nThe aim of consistency regularization is to force the network not to change the output, when the input or the network itself is perturbed.\nGenerative adversarial networks (GANs) have also shown strong empirical results. \nIn many approaches the GAN architecture is used in order to create additional data and therefor to increase the generalization capability of the classification network.\nFurthermore we consider the use of unlabeled data for further performance improvement. \nThe use of unlabeled data is investigated both for GANs and VAT. \n", "keywords": ["semi-supervised learning", "generative models", "few shot learning"], "authorids": ["andreas.look@ihs.uni-stuttgart.de", "stefan.riedelbauch@ihs.uni-stuttgart.de"], "authors": ["Andreas Look", "Stefan Riedelbauch"], "TL;DR": "Comparison of siamese neural networks, GANs, and VAT for few shot learning. ", "pdf": "/pdf/5b619ce7e636fbe77e59a7b18a024b5cb513b3e7.pdf", "paperhash": "look|learning_with_little_data_evaluation_of_deep_learning_algorithms"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087757621, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1xHUiC5tm", "original": "BylFvucGKQ", "number": 169, "cdate": 1538087756542, "ddate": null, "tcdate": 1538087756542, "tmdate": 1683306251956, "tddate": null, "forum": "B1xHUiC5tm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Context-aware Forecasting for Multivariate Stationary Time-series", "abstract": "The domain of time-series forecasting has been extensively studied because it is of fundamental importance in many real-life applications. Weather prediction, traffic flow forecasting or sales are compelling examples of sequential phenomena. Predictive models generally make use of the relations between past and future values. However, in the case of stationary time-series, observed values also drastically depend on a number of exogenous features that can be used to improve forecasting quality. In this work, we propose a change of paradigm which consists in learning such features in embeddings vectors within recurrent neural networks. We apply our framework to forecast smart cards tap-in logs in the Parisian subway network. Results show that context-embedded models perform quantitatively better in one-step ahead and multi-step ahead forecasting.", "keywords": [], "authorids": ["guiguetvalentin@gmail.com", "nicolas.baskiotis@lip6.fr", "vincent.guigue@lip6.fr", "patrick.gallinari@lip6.fr"], "authors": ["Valentin Guiguet", "Nicolas Baskiotis", "Vincent Guigue", "Patrick Gallinari"], "TL;DR": "In order to forecast multivariate stationary time-series we learn embeddings containing contextual features within a RNN; we apply the framework on public transportation data", "pdf": "/pdf/67af12b4086d190130cecaebbe59b4339169fb3b.pdf", "paperhash": "guiguet|contextaware_forecasting_for_multivariate_stationary_timeseries"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087756567, "details": {"replyCount": 6, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJzVUj0qtQ", "original": "B1eQURBDKX", "number": 166, "cdate": 1538087756004, "ddate": null, "tcdate": 1538087756004, "tmdate": 1750551537269, "tddate": null, "forum": "BJzVUj0qtQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Evading Defenses to Transferable Adversarial Examples by Mitigating Attention Shift", "abstract": "Deep neural networks are vulnerable to adversarial examples, which can mislead classifiers by adding imperceptible perturbations. An intriguing property of adversarial examples is their good transferability, making black-box attacks feasible in real-world applications. Due to the threat of adversarial attacks, many methods have been proposed to improve the robustness, and several state-of-the-art defenses are shown to be robust against transferable adversarial examples. In this paper, we identify the attention shift phenomenon, which may hinder the transferability of adversarial examples to the defense models. It indicates that the defenses rely on different discriminative regions to make predictions compared with normally trained models. Therefore, we propose an attention-invariant attack method to generate more transferable adversarial examples. Extensive experiments on the ImageNet dataset validate the effectiveness of the proposed method. Our best attack fools eight state-of-the-art defenses at an 82% success rate on average based only on the transferability, demonstrating the insecurity of the defense techniques. ", "keywords": ["adversarial examples", "black-box attack", "transferability"], "authorids": ["dyp17@mails.tsinghua.edu.cn", "pty17@mails.tsinghua.edu.cn", "suhangss@mail.tsinghua.edu.cn", "dcszj@mail.tsinghua.edu.cn"], "authors": ["Yinpeng Dong", "Tianyu Pang", "Hang Su", "Jun Zhu"], "TL;DR": "We propose an attention-invariant attack method to generate more transferable adversarial examples for black-box attacks, which can fool state-of-the-art defenses with a high success rate.", "pdf": "/pdf/4699f6c8f823181a4befe3ee1bbd8ce411a8245a.pdf", "paperhash": "dong|evading_defenses_to_transferable_adversarial_examples_by_mitigating_attention_shift", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/evading-defenses-to-transferable-adversarial/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087756030, "details": {"replyCount": 9, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HklmIsC9Y7", "original": "BJeEDI0Kt7", "number": 159, "cdate": 1538087754767, "ddate": null, "tcdate": 1538087754767, "tmdate": 1683306251787, "tddate": null, "forum": "HklmIsC9Y7", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "UNSUPERVISED CONVOLUTIONAL NEURAL NETWORKS FOR ACCURATE VIDEO FRAME INTERPOLATION WITH INTEGRATION OF MOTION COMPONENTS", "abstract": "Optical flow and video frame interpolation are considered as a chicken-egg problem such that one problem affects the other and vice versa. This paper presents a deep neural network that integrates the flow network into the frame interpolation problem, with end-to-end learning. The proposed approach exploits the relationship between the two problems for quality enhancement of interpolation frames. Unlike recent convolutional neural networks, the proposed approach learns motions from natural video frames without graphical ground truth flows for training. This makes the network learn from extensive data and improve the performance. The motion information from the flow network guides interpolator networks to be trained to synthesize the interpolated frame accurately from motion scenarios. In addition, diverse datasets to cover various challenging cases that previous interpolations usually fail in is used for comparison. In all experimental datasets, the proposed network achieves better performance than state-of-art CNN based interpolations. With Middebury benchmark, compared with the top-ranked algorithm, the proposed network reduces an average interpolation error by about 9.3%. The proposed interpolation is ranked the 1st in Standard Deviation (SD) interpolation error, the 2nd in Average Interpolation Error among over 150 algorithms listed in the Middlebury interpolation benchmark.", "keywords": ["Frame Interpolation", "Frame Rate Up Conversion", "Convolutional Neural Networks", "CNN", "Unsupervised learning"], "authorids": ["itmanhieu@snu.ac.kr", "kyujoonglee@sunmoon.ac.kr", "hjlee@capp.snu.ac.kr"], "authors": ["Thang Van Nguyen", "Kyu-Joong Lee", "Hyuk-Jae Lee"], "pdf": "/pdf/52f5e4ab36307d568022b7be50e01b5bcec9dc3b.pdf", "paperhash": "nguyen|unsupervised_convolutional_neural_networks_for_accurate_video_frame_interpolation_with_integration_of_motion_components"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087754796, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "Sy4G8sC9KX", "original": "rJxOK35FY7", "number": 157, "cdate": 1538087754423, "ddate": null, "tcdate": 1538087754423, "tmdate": 1683306251733, "tddate": null, "forum": "Sy4G8sC9KX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "N/A", "abstract": "N/A", "keywords": [], "authorids": ["vladymyrov@gmail.com"], "authors": ["N/A"], "pdf": "/pdf/6ea5ac9af056884a6ceba385bbe8748e32b39fe7.pdf", "paperhash": "na|na"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087754448, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SyEGUi05Km", "original": "SJlSCQOPF7", "number": 156, "cdate": 1538087754247, "ddate": null, "tcdate": 1538087754247, "tmdate": 1750551537416, "tddate": null, "forum": "SyEGUi05Km", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "CrystalGAN: Learning to Discover Crystallographic Structures with Generative Adversarial Networks", "abstract": "Our main motivation is to propose an efficient approach to generate novel multi-element stable chemical compounds that can be used in real world applications. This task can be formulated as a combinatorial problem, and it takes many hours of human experts to construct, and to evaluate new data. Unsupervised learning methods such as Generative Adversarial Networks (GANs) can be efficiently used to produce new data. Cross-domain Generative Adversarial Networks were reported to achieve exciting results in image processing applications. However, in the domain of materials science, there is a need to synthesize data with higher order complexity compared to observed samples, and the state-of-the-art cross-domain GANs can not be adapted directly. \n\nIn this contribution, we propose a novel GAN called CrystalGAN which generates new chemically stable crystallographic structures with increased domain complexity. We introduce an original architecture, we provide the corresponding loss functions, and we show that the CrystalGAN generates very reasonable data. We illustrate the efficiency of the proposed method on a real original problem of novel hydrides discovery that can be further used in development of hydrogen storage materials.", "keywords": ["Generative Adversarial Nets", "Cross-Domain Learning", "Materials Science", "Higher-order Complexity"], "authorids": ["asma.nouira.91@gmail.com", "nataliya.sokolovska@upmc.fr", "jccrivello@icmpe.cnrs.fr"], "authors": ["Asma Nouira", "Nataliya Sokolovska", "Jean-Claude Crivello"], "TL;DR": "\"Generating new chemical materials using novel cross-domain GANs.\"", "pdf": "/pdf/939eaaf772cfe5eb581dc9ecefdd5c95debd2666.pdf", "paperhash": "nouira|crystalgan_learning_to_discover_crystallographic_structures_with_generative_adversarial_networks", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/crystalgan-learning-to-discover/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087754273, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rJe-LiA5YX", "original": "BJxakHauK7", "number": 151, "cdate": 1538087753319, "ddate": null, "tcdate": 1538087753319, "tmdate": 1683306251444, "tddate": null, "forum": "rJe-LiA5YX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Exponentially Decaying Flows for Optimization in Deep Learning", "abstract": "The field of deep learning has been craving for an optimization method that shows outstanding property for both optimization and generalization. We propose a method for mathematical optimization based on flows along geodesics, that is, the shortest paths between two points, with respect to the Riemannian metric induced by a non-linear function. In our method, the flows refer to Exponentially Decaying Flows (EDF), as they can be designed to converge on the local solutions exponentially. In this paper, we conduct experiments to show its high performance on optimization benchmarks (i.e., convergence properties), as well as its potential for producing good machine learning benchmarks (i.e., generalization properties).", "keywords": ["optimization", "deep learning"], "authorids": ["takeori.mitsuharu.d5s@jp.nssol.nssmc.com", "nakamura.kenta.4n4@jp.nssol.nssmc.com"], "authors": ["Mitsuharu Takeori", "Kenta Nakamura"], "TL;DR": "Introduction of a new optimization method and its application to deep learning.", "pdf": "/pdf/749a496fdc031db1e1814571f105483892e23973.pdf", "paperhash": "takeori|exponentially_decaying_flows_for_optimization_in_deep_learning"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087753343, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "r1z1UjA5FX", "original": "HygQ4rB4FX", "number": 138, "cdate": 1538087750902, "ddate": null, "tcdate": 1538087750902, "tmdate": 1750551537520, "tddate": null, "forum": "r1z1UjA5FX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Adversarial Defense Via Data Dependent Activation Function and Total Variation Minimization", "abstract": "We improve the robustness of deep neural nets to adversarial attacks by using an interpolating function as the output activation. This data-dependent activation function remarkably improves both classification accuracy and stability to adversarial perturbations. Together with the total variation minimization of adversarial images and augmented training, under the strongest attack, we achieve up to 20.6%, 50.7%, and 68.7% accuracy improvement w.r.t. the fast gradient sign method, iterative fast gradient sign method, and Carlini-WagnerL2attacks, respectively. Our defense strategy is additive to many of the existing methods. We give an intuitive explanation of our defense strategy via analyzing the geometry of the feature space. For reproducibility, the code will be available on GitHub.", "keywords": ["Adversarial Attack", "Adversarial Defense", "Data Dependent Activation Function", "Total Variation Minimization"], "authorids": ["wangbaonj@gmail.com", "atlin@math.ucla.edu", "zqshi@mail.tsinghua.edu.cn", "zhu@math.duke.edu", "yph@g.ucla.edu", "bertozzi@math.ucla.edu", "sjo@math.ucla.edu"], "authors": ["Bao Wang", "Alex T. Lin", "Zuoqiang Shi", "Wei Zhu", "Penghang Yin", "Andrea L. Bertozzi", "Stanley J. Osher"], "TL;DR": "We proposal strategies for adversarial defense based on data dependent activation function, total variation minimization, and training data augmentation", "pdf": "/pdf/0af7d8595acd050aa1f5ed39386d3b23f2d3c516.pdf", "paperhash": "wang|adversarial_defense_via_data_dependent_activation_function_and_total_variation_minimization", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/adversarial-defense-via-data-dependent/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087750929, "details": {"replyCount": 13, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BylTHoR5Km", "original": "HJl0eAtxY7", "number": 126, "cdate": 1538087748748, "ddate": null, "tcdate": 1538087748748, "tmdate": 1683306251253, "tddate": null, "forum": "BylTHoR5Km", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Isolating effects of age with fair representation learning when assessing dementia", "abstract": "One of the most prevalent symptoms among the elderly population, dementia, can be detected by classifiers trained on linguistic features extracted from narrative transcripts. However, these linguistic features are impacted in a similar but different fashion by the normal aging process. Aging is therefore a confounding factor, whose effects have been hard for machine learning classifiers to isolate. \n\nIn this paper, we show that deep neural network (DNN) classifiers can infer ages from linguistic features, which is an entanglement that could lead to unfairness across age groups. We show this problem is caused by undesired activations of v-structures in causality diagrams, and it could be addressed with fair representation learning. We build neural network classifiers that learn low-dimensional representations reflecting the impacts of dementia yet discarding the effects of age. To evaluate these classifiers, we specify a model-agnostic score $\\Delta_{eo}^{(N)}$ measuring how classifier results are disentangled from age. Our best models outperform baseline neural network classifiers in disentanglement, while compromising accuracy by as little as 2.56\\% and 2.25\\% on DementiaBank and the Famous People dataset respectively. ", "keywords": [], "authorids": ["zining.zhu@mail.utoronto.ca", "jekaterina@winterlightlabs.com", "frank@spoclab.com"], "authors": ["Zining Zhu", "Jekaterina Novikova", "Frank Rudzicz"], "TL;DR": "Show that age confounds cognitive impairment detection + solve with fair representation learning + propose metrics and models.", "pdf": "/pdf/2f8c0d1cf927595218d57eb4cff726c65d05aa31.pdf", "paperhash": "zhu|isolating_effects_of_age_with_fair_representation_learning_when_assessing_dementia"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087748773, "details": {"replyCount": 8, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rJgdHs05FQ", "original": "HJgUmdKtKQ", "number": 103, "cdate": 1538087744449, "ddate": null, "tcdate": 1538087744449, "tmdate": 1683306251198, "tddate": null, "forum": "rJgdHs05FQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Nonlinear Channels Aggregation Networks for Deep Action Recognition", "abstract": "We introduce the concept of channel aggregation in ConvNet architecture, a novel compact representation of CNN features useful for explicitly modeling the nonlinear channels encoding especially when the new unit is embedded inside of deep architectures for action recognition. The channel aggregation is based on multiple-channels features of ConvNet and aims to be at the spot finding the optical convergence path at fast speed. We name our proposed convolutional architecture \u201cnonlinear channels aggregation networks (NCAN)\u201d and its new layer \u201cnonlinear channels aggregation layer (NCAL)\u201d. We theoretically motivate channels aggregation functions and empirically study their effect on convergence speed and classification accuracy. Another contribution in this work is an efficient and effective implementation of the NCAL, speeding it up orders of magnitude. We evaluate its performance on standard benchmarks UCF101 and HMDB51, and experimental results demonstrate that this formulation not only obtains a fast convergence but stronger generalization capability without sacrificing performance.", "keywords": ["action recognition", "convolutional neural network", "network training"], "authorids": ["zgzhu_xidian@163.com", "hbji@xidian.edu.cn", "zwbsoul@163.com", "ouoyc@aliyun.com"], "authors": ["Zhigang Zhu", "Hongbing Ji", "Wenbo Zhang", "Cheng Ouyang"], "TL;DR": "An architecture enables CNN trained on the video sequences converging rapidly ", "pdf": "/pdf/806c29ad0d911811b613044c2254ac129edda299.pdf", "paperhash": "zhu|nonlinear_channels_aggregation_networks_for_deep_action_recognition"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087744476, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "B1l8SsR9Fm", "original": "H1gBRL4KFQ", "number": 88, "cdate": 1538087741725, "ddate": null, "tcdate": 1538087741725, "tmdate": 1683306251381, "tddate": null, "forum": "B1l8SsR9Fm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Learning and Data Selection in Big Datasets", "abstract": "Finding a dataset of minimal cardinality to characterize the optimal parameters of a model is of paramount importance in machine learning and distributed optimization over a network. This paper investigates the compressibility of large datasets. More specifically, we propose a framework that jointly learns the input-output mapping as well as the most representative samples of the dataset (sufficient dataset). Our analytical results show that the cardinality of the sufficient dataset increases sub-linearly with respect to the original dataset size. Numerical evaluations of real datasets reveal a large compressibility, up to 95%, without a noticeable drop in the learnability performance, measured by the generalization error.\n", "paperhash": "ghadikolaei|learning_and_data_selection_in_big_datasets", "authorids": ["hshokri@kth.se", "ghauch@kth.se", "carlofi@kth.se", "skoglund@kth.se"], "authors": ["Hossein S. Ghadikolaei", "Hadi Ghauch", "Carlo Fischione", "Mikael Skoglund"], "keywords": ["Data selection", "non-convex optimization", "learning theory", "active learning"], "pdf": "/pdf/950bfcaecf33e7b94f86ceaaed0ffa9947a63e11.pdf"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087741753, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HJM4SjR5KQ", "original": "HkxEPgO0um", "number": 81, "cdate": 1538087740404, "ddate": null, "tcdate": 1538087740404, "tmdate": 1750551537790, "tddate": null, "forum": "HJM4SjR5KQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "SpaMHMM: Sparse Mixture of Hidden Markov Models for Graph Connected Entities", "abstract": "We propose a framework to model the distribution of sequential data coming from\na set of entities connected in a graph with a known topology. The method is\nbased on a mixture of shared hidden Markov models (HMMs), which are trained\nin order to exploit the knowledge of the graph structure and in such a way that the\nobtained mixtures tend to be sparse. Experiments in different application domains\ndemonstrate the effectiveness and versatility of the method.", "paperhash": "pernes|spamhmm_sparse_mixture_of_hidden_markov_models_for_graph_connected_entities", "TL;DR": "A method to model the generative distribution of sequences coming from graph connected entities.", "authorids": ["dpc@inesctec.pt", "jaime.cardoso@inesctec.pt"], "authors": ["Diogo Pernes", "Jaime S. Cardoso"], "keywords": ["multi-entity sequential data", "hidden markov models"], "pdf": "/pdf/07cc47ade35cb7a7e403954909325ba469a6e2e9.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/spamhmm-sparse-mixture-of-hidden-markov/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087740430, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rkgWBi09Ym", "original": "S1lYp4VftX", "number": 64, "cdate": 1538087737292, "ddate": null, "tcdate": 1538087737292, "tmdate": 1683306251295, "tddate": null, "forum": "rkgWBi09Ym", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Multi-Modal Generative Adversarial Networks for Diverse Datasets", "abstract": "Generative Adversarial Networks (GANs) have been shown to produce realistically looking synthetic images with remarkable success, yet their performance seems less impressive when the training set is highly diverse. In order to provide a better fit to the target data distribution when the dataset includes many different classes, we propose a variant of the basic GAN model, a Multi-Modal Gaussian-Mixture GAN (GM-GAN), where the probability distribution over the latent space is a mixture of Gaussians. We also propose a supervised variant which is capable of conditional sample synthesis. In order to evaluate the model's performance, we propose a new scoring method which separately takes into account two (typically conflicting) measures - diversity vs. quality of the generated data. Through a series of experiments, using both synthetic and real-world datasets, we quantitatively show that GM-GANs outperform baselines, both when evaluated using the commonly used Inception Score, and when evaluated using our own alternative scoring method. In addition, we qualitatively demonstrate how the unsupervised variant of GM-GAN tends to map latent vectors sampled from different Gaussians in the latent space to samples of different classes in the data space. We show how this phenomenon can be exploited for the task of unsupervised clustering, and provide quantitative evaluation showing the superiority of our method for the unsupervised clustering of image datasets. Finally, we demonstrate a feature which further sets our model apart from other GAN models: the option to control the quality-diversity trade-off by altering, post-training, the probability distribution of the latent space. This allows one to sample higher quality and lower diversity samples, or vice versa, according to one's needs.", "paperhash": "benyosef|multimodal_generative_adversarial_networks_for_diverse_datasets", "TL;DR": "Multi modal Guassian distribution of latent space in GAN models improves performance and allows to trade-off quality vs. diversity", "authorids": ["matan.benyosef@mail.huji.ac.il", "daphna@cs.huji.ac.il"], "authors": ["Matan Ben-Yosef", "Daphna Weinshall"], "keywords": ["generative adversarial networks", "generative models", "clustering", "visual object recognition"], "pdf": "/pdf/9581241e7ba8411f54fe22635236fd8108ea2a10.pdf"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087737320, "details": {"replyCount": 3, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "rylxrsR9Fm", "original": "BklVSY3GFm", "number": 56, "cdate": 1538087735827, "ddate": null, "tcdate": 1538087735827, "tmdate": 1683306250832, "tddate": null, "forum": "rylxrsR9Fm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Neuron Hierarchical Networks", "abstract": "In this paper, we propose a neural network framework called neuron hierarchical network (NHN), that evolves beyond the hierarchy in layers, and concentrates on the hierarchy of neurons. We observe mass redundancy in the weights of both handcrafted and randomly searched architectures. Inspired by the development of human brains, we prune low-sensitivity neurons in the model and add new neurons to the graph, and the relation between individual neurons are emphasized and the existence of layers weakened. We propose a process to discover the best base model by random architecture search, and discover the best locations and connections of the added neurons by evolutionary search. Experiment results show that the NHN achieves higher test accuracy on Cifar-10 than state-of-the-art handcrafted and randomly searched architectures, while requiring much fewer parameters and less searching time.", "paperhash": "yue|neuron_hierarchical_networks", "TL;DR": "By breaking the layer hierarchy, we propose a 3-step approach to the construction of neuron-hierarchy networks that outperform NAS, SMASH and hierarchical representation with fewer parameters and shorter searching time.", "authorids": ["johnhany@163.com", "wudean.cn@uestc.edu.cn", "wulei@uestc.edu.cn", "zonghengxs@163.com"], "authors": ["Han Yue", "De-An Wu", "Lei Wu", "Ji Xie"], "keywords": ["neural network", "architecture search", "evolution strategy"], "pdf": "/pdf/5d6d874ccbd892c528ca10fc9f258b9908b5620f.pdf"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087735852, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "BJlhEs09YQ", "original": "Syl_e6N_tQ", "number": 34, "cdate": 1538087731714, "ddate": null, "tcdate": 1538087731714, "tmdate": 1683306250752, "tddate": null, "forum": "BJlhEs09YQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "End-to-end Learning of a Convolutional Neural Network via Deep Tensor Decomposition", "abstract": "In this paper we study the problem of learning the weights of a deep convolutional neural network. We consider a network where convolutions are carried out over non-overlapping patches with a single kernel in each layer. We develop an algorithm for simultaneously learning all the kernels from the training data. Our approach dubbed Deep Tensor Decomposition (DeepTD) is based on a rank-1 tensor decomposition. We theoretically investigate DeepTD under a realizable model for the training data where the inputs are chosen i.i.d. from a Gaussian distribution and the labels are generated according to planted convolutional kernels. We show that DeepTD is data-efficient and provably works as soon as the sample size exceeds the total number of convolutional weights in the network. Our numerical experiments demonstrate the effectiveness of DeepTD and verify our theoretical findings.", "paperhash": "oymak|endtoend_learning_of_a_convolutional_neural_network_via_deep_tensor_decomposition", "keywords": ["convolutional neural network", "tensor decomposition", "sample complexity", "approximation"], "authorids": ["sametoymak@gmail.com", "soltanol@usc.edu"], "authors": ["Samet Oymak", "Mahdi Soltanolkotabi"], "TL;DR": "We consider a simplified deep convolutional neural network model. We show that all layers of this network can be approximately learned with a proper application of tensor decomposition.", "pdf": "/pdf/af6bda4b01a1ddda57321e68b2271d38a60373a3.pdf"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087731742, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "H1fs4oRqKm", "original": "B1xvr5UuYm", "number": 31, "cdate": 1538087731132, "ddate": null, "tcdate": 1538087731132, "tmdate": 1750551537899, "tddate": null, "forum": "H1fs4oRqKm", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "UNSUPERVISED MONOCULAR DEPTH ESTIMATION WITH CLEAR BOUNDARIES", "abstract": "Unsupervised monocular depth estimation has made great progress after deep\nlearning is involved. Training with binocular stereo images is considered as a\ngood option as the data can be easily obtained. However, the depth or disparity\nprediction results show poor performance for the object boundaries. The main\nreason is related to the handling of occlusion areas during the training. In this paper,\nwe propose a novel method to overcome this issue. Exploiting disparity maps\nproperty, we generate an occlusion mask to block the back-propagation of the occlusion\nareas during image warping. We also design new networks with flipped\nstereo images to induce the networks to learn occluded boundaries. It shows that\nour method achieves clearer boundaries and better evaluation results on KITTI\ndriving dataset and Virtual KITTI dataset.", "paperhash": "hu|unsupervised_monocular_depth_estimation_with_clear_boundaries", "keywords": ["monocular depth estimation", "unsupervised learning", "image warping"], "authorids": ["y4hu@eng.ucsd.edu", "heng.luo@horizon.ai", "yifeng.geng@horizon.ai"], "authors": ["Yihan Hu", "Heng Luo", "Yifeng Geng"], "TL;DR": "This paper propose a mask method which solves the previous blurred results of unsupervised monocular depth estimation caused by occlusion", "pdf": "/pdf/fd8a95731363be1f28040da1e23cdeea49808d03.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 12 code implementations](https://www.catalyzex.com/paper/unsupervised-monocular-depth-estimation-with/code)"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087731160, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "ByzoVi0cFQ", "original": "B1elf1vdt7", "number": 30, "cdate": 1538087730950, "ddate": null, "tcdate": 1538087730950, "tmdate": 1683306250597, "tddate": null, "forum": "ByzoVi0cFQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Transfer Learning for Estimating Causal Effects Using Neural Networks", "abstract": "We develop new algorithms for estimating heterogeneous treatment effects, combining recent developments in transfer learning for neural networks with insights from the causal inference literature. By taking advantage of transfer learning, we are able to efficiently use different data sources that are related to the same underlying causal mechanisms. We compare our algorithms with those in the extant literature using extensive simulation studies based on large-scale voter persuasion experiments and the MNIST database. Our methods can perform an order of magnitude better than existing benchmarks while using a fraction of the data.", "paperhash": "k\u00fcnzel|transfer_learning_for_estimating_causal_effects_using_neural_networks", "keywords": ["machine learning", "causal inference", "causal neural networks", "deep learning", "CATE estimation", "transfer learning", "meta-learning", "causal transfer"], "authorids": ["srk@berkeley.edu", "bstadie@berkeley.edu", "nikitavemuri@berkeley.edu", "vio@berkeley.edu", "sekhon@berkeley.edu", "pabbeel@cs.berkeley.edu"], "authors": ["S\u00f6ren R. K\u00fcnzel", "Bradly C. Stadie", "Nikita Vemuri", "Varsha Ramakrishnan", "Jasjeet S. Sekhon", "Pieter Abbeel"], "TL;DR": "Transfer learning for estimating causal effects using neural networks.", "pdf": "/pdf/caf2d8c58f700eb73d556a6f3698e2e0cbcc02e2.pdf"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087730974, "details": {"replyCount": 4, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "Byg54oC5tQ", "original": "rJluamsuFQ", "number": 25, "cdate": 1538087730012, "ddate": null, "tcdate": 1538087730012, "tmdate": 1683306250568, "tddate": null, "forum": "Byg54oC5tQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Generative Model For Material Irradiation Experiments Based On Prior Knowledge And Attention Mechanism", "abstract": "Material irradiation experiment is dangerous and complex, which requires large number of high-level expertise in the manual processing of experimental images and data. In this paper, we propose a generative adversarial model based on prior knowledge and attention mechanism to achieve the generation of irradiated material images (data-to-image model), and a prediction model for corresponding industrial performance (image-to-data model). With the proposed models, researchers can skip the dangerous and complex irradiation experiments and obtain the irradiation images and industrial performance parameters directly by inputing some experimental parameters only. We also introduce a new dataset ISMD which contains 22000 irradiated images with 22,143 sets of corresponding parameters. Our model achieved high quality results by compared with several baseline models. The evaluation and detailed analysis are also performed.", "paperhash": "luo|generative_model_for_material_irradiation_experiments_based_on_prior_knowledge_and_attention_mechanism", "authorids": ["luomincong@foxmail.com", "1920148271@qq.com"], "authors": ["MinCong Luo", "Li Liu"], "keywords": ["Generative Model", "Images of Irradiation Experiments", "Prior Knowledge", "Attention Mechanism"], "pdf": "/pdf/3bf7b090abe2a748606fb70ec9e690c732f5148e.pdf"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087730039, "details": {"replyCount": 3, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SkMPNoCcKQ", "original": "rke797MtFX", "number": 10, "cdate": 1538087727162, "ddate": null, "tcdate": 1538087727162, "tmdate": 1683306250711, "tddate": null, "forum": "SkMPNoCcKQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Linearizing Visual Processes with Deep Generative Models", "abstract": "This work studies the problem of modeling non-linear visual processes by leveraging deep generative architectures for learning linear, Gaussian models of observed sequences. We propose a joint learning framework, combining a multivariate autoregressive model and deep convolutional generative networks. After justification of theoretical assumptions of inearization, we propose an architecture that allows Variational Autoencoders and Generative Adversarial Networks to simultaneously learn the non-linear observation as well as the linear state-transition model from a sequence of observed frames. Finally, we demonstrate our approach on conceptual toy examples and dynamic textures.", "paperhash": "sagel|linearizing_visual_processes_with_deep_generative_models", "keywords": ["Genearative Adversarial Network", "Variational Autoencoder", "Wasserstein GAN", "Autoregressive Model", "Dynamic Texture", "Video"], "authorids": ["a.sagel@tum.de", "shen@fortiss.org"], "authors": ["Alexander Sagel", "Hao Shen"], "TL;DR": "We model non-linear visual processes as autoregressive noise via generative deep learning.", "pdf": "/pdf/fc23fe20cdc02ca00a4bcbf5782fc1ccba1c9e73.pdf"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087727190, "details": {"replyCount": 7, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "SkgD4jAcYX", "original": "HJgQhhI_t7", "number": 7, "cdate": 1538087726570, "ddate": null, "tcdate": 1538087726570, "tmdate": 1683306250342, "tddate": null, "forum": "SkgD4jAcYX", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "NA", "abstract": "NA", "paperhash": "na|na", "keywords": ["NA"], "authorids": ["hongyang.gao@wsu.edu"], "authors": ["NA"], "TL;DR": "NA", "pdf": "/pdf/082af21a87467be889f234c69009cb0b40d6b9b4.pdf"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087726598, "details": {"replyCount": 13, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}} +{"id": "HJMINj05tQ", "original": "Syl_KXmKt7", "number": 5, "cdate": 1538087726187, "ddate": null, "tcdate": 1538087726187, "tmdate": 1683306250559, "tddate": null, "forum": "HJMINj05tQ", "replyto": null, "invitation": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "content": {"title": "Nesterov's method is the discretization of a differential equation with Hessian damping", "abstract": "Su-Boyd-Candes (2014) made a connection between Nesterov's method and an ordinary differential equation (ODE). We show if a Hessian damping term is added to the ODE from Su-Boyd-Candes (2014), then Nesterov's method arises as a straightforward discretization of the modified ODE. Analogously, in the strongly convex case, a Hessian damping term is added to Polyak's ODE, which is then discretized to yield Nesterov's method for strongly convex functions. Despite the Hessian term, both second order ODEs can be represented as first order systems.\n\nEstablished Liapunov analysis is used to recover the accelerated rates of convergence in both continuous and discrete time. Moreover, the Liapunov analysis can be extended to the case of stochastic gradients which allows the full gradient case to be considered as a special case of the stochastic case. The result is a unified approach to convex acceleration in both continuous and discrete time and in both the stochastic and full gradient cases. \n", "paperhash": "oberman|nesterovs_method_is_the_discretization_of_a_differential_equation_with_hessian_damping", "keywords": ["Nesterov's method", "convex optimization", "first-order methods", "stochastic gradient descent", "differential equations", "Liapunov's method"], "authorids": ["adam.oberman@mcgill.ca", "maxime.laborde@mcgill.ca"], "authors": ["Adam M. Oberman", "Maxime Laborde"], "TL;DR": "We derive Nesterov's method arises as a straightforward discretization of an ODE different from the one in Su-Boyd-Candes and prove acceleration the stochastic case", "pdf": "/pdf/84926e0df0bffb14be9ee086418341d7705d45e7.pdf"}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Conference"], "odate": 1538087726212, "details": {"replyCount": 9, "invitation": {"id": "ICLR.cc/2019/Conference/-/Withdrawn_Submission", "rdate": null, "ddate": null, "expdate": null, "duedate": null, "tmdate": 1539179226045, "tddate": null, "super": null, "final": null, "reply": {"forum": null, "replyto": null, "readers": {"description": "The users who will be allowed to read the reply content.", "values": ["everyone"]}, "signatures": {"description": "How your identity will be displayed with the above content.", "values": ["ICLR.cc/2019/Conference"]}, "writers": {"description": "Users that may modify this record.", "values": ["ICLR.cc/2019/Conference"]}, "content": {}}, "signatures": ["ICLR.cc/2019/Conference"], "readers": ["everyone"], "nonreaders": [], "invitees": ["ICLR.cc/2019/Conference"], "noninvitees": [], "writers": ["ICLR.cc/2019/Conference"], "multiReply": null, "taskCompletionCount": null, "transform": null, "cdate": 1539179226045, "tcdate": 1539179226045, "type": "note"}}}