| {"id": "S1xnKi5BOV", "original": "HJxUko9HuV", "number": 68, "cdate": 1553472388032, "ddate": null, "tcdate": 1553472388032, "tmdate": 1750552033653, "tddate": null, "forum": "S1xnKi5BOV", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "A Simple yet Effective Baseline for Robust Deep Learning with Noisy Labels", "authors": ["Anonymous"], "authorids": ["luoyc15@mails.tsinghua.edu.cn", "dcszj@mail.tsinghua.edu.cn", "tpfister@google.com"], "keywords": ["Learning with noisy labels", "generalization of deep neural networks", "robust deep learning"], "TL;DR": "The paper proposed a simple yet effective baseline for learning with noisy labels.", "abstract": "Recently deep neural networks have shown their capacity to memorize training data, even with noisy labels, which hurts generalization performance. To mitigate this issue, we propose a simple but effective method that is robust to noisy labels, even with severe noise. Our objective involves a variance regularization term that implicitly penalizes the Jacobian norm of the neural network on the whole training set (including the noisy-labeled data), which encourages generalization and prevents overfitting to the corrupted labels. Experiments on noisy benchmarks demonstrate that our approach achieves state-of-the-art performance with a high tolerance to severe noise.", "pdf": "/pdf/4ba6db75005b77ffbe09fc3209bbfa31d6c8e346.pdf", "paperhash": "anonymous|a_simple_yet_effective_baseline_for_robust_deep_learning_with_noisy_labels", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nanonymous2019a,\ntitle={A Simple yet Effective Baseline for Robust Deep Learning with Noisy Labels},\nauthor={Anonymous},\nyear={2019},\nurl={https://openreview.net/forum?id=S1xnKi5BOV}\n}", "community_implementations": "[ 2 code implementations](https://www.catalyzex.com/paper/a-simple-yet-effective-baseline-for-robust/code)"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553472388344, "details": {"replyCount": 3}} | |
| {"id": "Sklsts5H_E", "original": "Syx6OK5rdE", "number": 66, "cdate": 1553472386714, "ddate": null, "tcdate": 1553472386714, "tmdate": 1683306279263, "tddate": null, "forum": "Sklsts5H_E", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Deep Generative Inpainting with Comparative Sample Augmentation", "authors": ["Boli Fang", "Miao Jiang", "Jerry Shen", "Bjord Stenger"], "authorids": ["bfang@iu.edu", "miajiang@iu.edu", "hashen@iu.edu", "bjord.stenger@rakuten.com"], "keywords": ["Image Inpainting", "Various Datasets"], "TL;DR": "We introduced a strategy which enables inpainting models on datasets of various sizes", "abstract": "Recent advancements in deep learning techniques such as Convolutional Neural Networks(CNN) and Generative Adversarial Networks(GAN) have achieved breakthroughs in the problem of semantic image inpainting, the task of reconstructing missing pixels in given images. While much more effective than conventional approaches, deep learning models require large datasets and great computational resources for training, and inpainting quality varies considerably when training data vary in size and diversity. To address these problems, we present in this paper a inpainting strategy of \\textit{Comparative Sample Augmentation}, which enhances the quality of training set by filtering out irrelevant images and constructing additional images using information about the surrounding regions of the images to be inpainted. Experiments on multiple datasets demonstrate that our method extends the applicability of deep inpainting models to training sets with varying sizes, while maintaining inpainting quality as measured by qualitative and quantitative metrics for a large class of deep models, with little need for model-specific consideration.", "pdf": "/pdf/78c1ab4cbf3df45e5bb44b2f3307fe47842c57b1.pdf", "paperhash": "fang|deep_generative_inpainting_with_comparative_sample_augmentation", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nfang2019deep,\ntitle={Deep Generative Inpainting with Comparative Sample Augmentation},\nauthor={Boli Fang and Miao Jiang and Jerry Shen and Bjord Stenger},\nyear={2019},\nurl={https://openreview.net/forum?id=Sklsts5H_E}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553472387020, "details": {"replyCount": 3}} | |
| {"id": "HyxYFjqHd4", "original": "SJgqXFFBuN", "number": 62, "cdate": 1553472384664, "ddate": null, "tcdate": 1553472384664, "tmdate": 1683306278660, "tddate": null, "forum": "HyxYFjqHd4", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Interactions between Representation Learning and Supervision", "authors": ["Valliappa Chockalingam"], "authorids": ["valliapp@ualberta.ca"], "keywords": [], "abstract": "Representation learning is one of the fundamental problems of machine learning. On its own, this problem can be cast as an unsupervised dimensionality reduction problem. However, representation learning is often also used as an implicit step in supervised learning (SL) or reinforcement learning (RL) problems. In this paper, we study the possible \"interference\" supervision, commonly provided through a loss function in SL or a reward function in RL, might have on learning representations, through the lens of learning from limited data and continual learning. Particularly, in connectionist networks, we often face the problem of catastrophic interference whereby changes in the data distribution cause networks to fail to remember previously learned information and learning representations can be done without labeled data. A primary running hypothesis is that representations learned using unsupervised learning are more robust to changes in the data distribution as compared to the intermediate representations learned when using supervision because supervision interferes with otherwise \"unconstrained\" representation learning objectives. To empirically test hypotheses, we perform experiments using a standard dataset for continual learning, permuted MNIST. Additionally, through a heuristic quantifying the amount of change in the data distribution, we verify that the results are statistically significant.", "pdf": "/pdf/157bc0856ed9f597e7f4aef2e227b86ed9321284.pdf", "paperhash": "chockalingam|interactions_between_representation_learning_and_supervision", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nchockalingam2019interactions,\ntitle={Interactions between Representation Learning and Supervision},\nauthor={Valliappa Chockalingam},\nyear={2019},\nurl={https://openreview.net/forum?id=HyxYFjqHd4}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553472384956, "details": {"replyCount": 3}} | |
| {"id": "SJedYj5ruV", "original": "HkgX_PYBuE", "number": 60, "cdate": 1553472383630, "ddate": null, "tcdate": 1553472383630, "tmdate": 1683306278379, "tddate": null, "forum": "SJedYj5ruV", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Weak Supervision for Time Series: Wearable Sensor Classification with Limited Labeled Data", "authors": ["Saelig Khattar", "Hannah O\u2019Day", "Paroma Varma", "Jason Fries", "Jen Hicks", "Scott Delp", "Helen Bronte-Stewart", "Chris Re"], "authorids": ["saelig@stanford.edu", "odayj@stanford.edu", "paroma@stanford.edu", "jfries@stanford.edu", "jenhicks@stanford.edu", "delp@stanford.edu", "hbs@stanford.edu", "chrismre@cs.stanford.edu"], "keywords": ["wearable", "sensors", "weak supervision", "time series", "Parkinsons"], "TL;DR": "We demonstrate the feasibility of a weakly supervised time series classification approach for wearable sensor data. ", "abstract": "Using modern deep learning models to make predictions on time series data from wearable sensors generally requires large amounts of labeled data. However, labeling these large datasets can be both cumbersome and costly. In this paper, we apply weak supervision to time series data, and programmatically label a dataset from sensors worn by patients with Parkinson's. We then built a LSTM model that predicts when these patients exhibit clinically relevant freezing behavior (inability to make effective forward stepping). We show that (1) when our model is trained using patient-specific data (prior sensor sessions), we come within 9% AUROC of a model trained using hand-labeled data and (2) when we assume no prior observations of subjects, our weakly supervised model matched performance with hand-labeled data. These results demonstrate that weak supervision may help reduce the need to painstakingly hand label time series training data.", "pdf": "/pdf/9646b81ecb58265b8f35d3c942d67daa0e80e3f8.pdf", "paperhash": "khattar|weak_supervision_for_time_series_wearable_sensor_classification_with_limited_labeled_data", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nkhattar2019weak,\ntitle={Weak Supervision for Time Series: Wearable Sensor Classification with Limited Labeled Data},\nauthor={Saelig Khattar and Hannah O{\\textquoteright}Day and Paroma Varma and Jason Fries and Jen Hicks and Scott Delp and Helen Bronte-Stewart and Chris Re},\nyear={2019},\nurl={https://openreview.net/forum?id=SJedYj5ruV}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553472383933, "details": {"replyCount": 3}} | |
| {"id": "HkxHFj5BdV", "original": "rygVM5_BdN", "number": 57, "cdate": 1553472381306, "ddate": null, "tcdate": 1553472381306, "tmdate": 1683306278095, "tddate": null, "forum": "HkxHFj5BdV", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Parallel Recurrent Data Augmentation for GAN training with Limited and Diverse Data", "authors": ["Boli Fang", "Miao Jiang"], "authorids": ["bfang@iu.edu", "miajiang@iu.edu"], "keywords": ["GAN training", "Data Augmentation"], "TL;DR": "We introduced a novel, simple, and efficient data augmentation method that boosts the performances of existing GANs when training data is limited and diverse. ", "abstract": "The need for large amounts of training image data with clearly defined features is a major obstacle to applying generative adversarial networks(GAN) on image generation where training data is limited but diverse, since insufficient latent feature representation in the already scarce data often leads to instability and mode collapse during GAN training. To overcome the hurdle of limited data when applying GAN to limited datasets, we propose in this paper the strategy of \\textit{parallel recurrent data augmentation}, where the GAN model progressively enriches its training set with sample images constructed from GANs trained in parallel at consecutive training epochs. Experiments on a variety of small yet diverse datasets demonstrate that our method, with little model-specific considerations, produces images of better quality as compared to the images generated without such strategy. The source code and generated images of this paper will be made public after review. ", "pdf": "/pdf/f4d6b1fb9fdc028d5bced9b1fcffbfed5fbfd050.pdf", "paperhash": "fang|parallel_recurrent_data_augmentation_for_gan_training_with_limited_and_diverse_data", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nfang2019parallel,\ntitle={Parallel Recurrent Data Augmentation for {GAN} training with Limited and Diverse Data},\nauthor={Boli Fang and Miao Jiang},\nyear={2019},\nurl={https://openreview.net/forum?id=HkxHFj5BdV}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553472382279, "details": {"replyCount": 3}} | |
| {"id": "H1gxgiA4uN", "original": "rJl0rhBN_N", "number": 49, "cdate": 1553423080240, "ddate": null, "tcdate": 1553423080240, "tmdate": 1683306277311, "tddate": null, "forum": "H1gxgiA4uN", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Multi-Class Few Shot Learning Task and Controllable Environment", "authors": ["Dmitriy Serdyuk", "Negar Rostamzadeh", "Pedro Oliveira Pinheiro", "Boris Oreshkin", "Yoshua Bengio"], "authorids": ["serdyuk.dmitriy@gmail.com", "negar@elementai.com", "pedro@elementai.com", "boris@elementai.com", "yoshua.bengion@mila.quebec"], "keywords": ["few-shot", "few shot", "meta-learning", "metalearning"], "TL;DR": "We introduce a diagnostic task which is a variation of few-shot learning and introduce a dataset for it.", "abstract": "Deep learning approaches usually require a large amount of labeled data to generalize. However, humans can learn a new concept only by a few samples. One of the high cogntition human capablities is to learn several concepts at the same time. In this paper, we address the task of classifying multiple objects by seeing only a few samples from each category. To the best of authors' knowledge, there is no dataset specially designed for few-shot multiclass classification. We design a task of mutli-object few class classification and an environment for easy creating controllable datasets for this task. We demonstrate that the proposed dataset is sound using a method which is an extension of prototypical networks.", "pdf": "/pdf/8e082a767644d249af9200d751debe67712c6f06.pdf", "paperhash": "serdyuk|multiclass_few_shot_learning_task_and_controllable_environment", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nserdyuk2019multiclass,\ntitle={Multi-Class Few Shot Learning Task and Controllable Environment},\nauthor={Dmitriy Serdyuk and Negar Rostamzadeh and Pedro Oliveira Pinheiro and Boris Oreshkin and Yoshua Bengio},\nyear={2019},\nurl={https://openreview.net/forum?id=H1gxgiA4uN}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553423080534, "details": {"replyCount": 3}} | |
| {"id": "HyeggoCN_4", "original": "r1eHMT4NuV", "number": 48, "cdate": 1553423079725, "ddate": null, "tcdate": 1553423079725, "tmdate": 1683306277200, "tddate": null, "forum": "HyeggoCN_4", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Learning To Avoid Negative Transfer in Few Shot Transfer Learning", "authors": ["James O' Neill"], "authorids": ["james.o-neill@liverpool.ac.uk"], "keywords": ["few shot learning", "negative transfer", "cubic spline", "ensemble learning"], "TL;DR": "A dynamic bagging methods approach to avoiding negatve transfer in neural network few-shot transfer learning", "abstract": "Many tasks in natural language understanding require learning relationships between two sequences for various tasks such as natural language inference, paraphrasing and entailment. These aforementioned tasks are similar in nature, yet they are often modeled individually. Knowledge transfer can be effective for closely related tasks, which is usually carried out using parameter transfer in neural networks. However, transferring all parameters, some of which irrelevant for a target task, can lead to sub-optimal results and can have a negative effect on performance, referred to as \\textit{negative} transfer. \n\nHence, this paper focuses on the transferability of both instances and parameters across natural language understanding tasks by proposing an ensemble-based transfer learning method in the context of few-shot learning.\n\nOur main contribution is a method for mitigating negative transfer across tasks when using neural networks, which involves dynamically bagging small recurrent neural networks trained on different subsets of the source task/s. We present a straightforward yet novel approach for incorporating these networks to a target task for few-shot learning by using a decaying parameter chosen according to the slope changes of a smoothed spline error curve at sub-intervals during training.\n\nOur proposed method show improvements over hard and soft parameter sharing transfer methods in the few-shot learning case and shows competitive performance against models that are trained given full supervision on the target task, from only few examples.", "pdf": "/pdf/4dfe81e059c263505ef5714bce087f7575f60456.pdf", "paperhash": "neill|learning_to_avoid_negative_transfer_in_few_shot_transfer_learning", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nneill2019learning,\ntitle={Learning To Avoid Negative Transfer in Few Shot Transfer Learning},\nauthor={James O' Neill},\nyear={2019},\nurl={https://openreview.net/forum?id=HyeggoCN_4}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553423080021, "details": {"replyCount": 3}} | |
| {"id": "H1xylj04_V", "original": "Bygqx8mNOE", "number": 47, "cdate": 1553423079208, "ddate": null, "tcdate": 1553423079208, "tmdate": 1683306276977, "tddate": null, "forum": "H1xylj04_V", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Siamese Capsule Networks ", "authors": ["James O' Neill"], "authorids": ["james.o-neill@liverpool.ac.uk"], "keywords": ["capsule networks", "face verification", "siamse networks", "few-shot learning", "contrastive loss"], "TL;DR": "A pairwise learned capsule network that performs well on face verification tasks given limited labeled data ", "abstract": "Capsule Networks have shown encouraging results on \\textit{defacto} benchmark computer vision datasets such as MNIST, CIFAR and smallNORB. Although, they are yet to be tested on tasks where (1) the entities detected inherently have more complex internal representations and (2) there are very few instances per class to learn from and (3) where point-wise classification is not suitable. Hence, this paper carries out experiments on face verification in both controlled and uncontrolled settings that together address these points. In doing so we introduce \\textit{Siamese Capsule Networks}, a new variant that can be used for pairwise learning tasks. We find that the model improves over baselines in the few-shot learning setting, suggesting that capsule networks are efficient at learning discriminative representations when given few samples. \nWe find that \\textit{Siamese Capsule Networks} perform well against strong baselines on both pairwise learning datasets when trained using a contrastive loss with $\\ell_2$-normalized capsule encoded pose features, yielding best results in the few-shot learning setting where image pairs in the test set contain unseen subjects.", "pdf": "/pdf/173a34150e50b4689490e37202494343a2b261ff.pdf", "paperhash": "neill|siamese_capsule_networks", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nneill2019siamese,\ntitle={Siamese Capsule Networks },\nauthor={James O' Neill},\nyear={2019},\nurl={https://openreview.net/forum?id=H1xylj04_V}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553423079506, "details": {"replyCount": 2}} | |
| {"id": "rkxJgoRN_V", "original": "Bke0doGVOV", "number": 46, "cdate": 1553423078701, "ddate": null, "tcdate": 1553423078701, "tmdate": 1683306276811, "tddate": null, "forum": "rkxJgoRN_V", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Automatic Labeling of Data for Transfer Learning", "authors": ["Parijat Dube", "Bishwaranjan Bhattacharjee", "Siyu Huo", "Patrick Watson", "John Kender", "Brian Belgodere"], "authorids": ["pdube@us.ibm.com", "bhatta@us.ibm.com", "siyu.huo@us.ibm.com", "pwatson@us.ibm.com", "jrk@cs.columbia.edu", "bmbelgod@us.ibm.com"], "keywords": ["transfer learning", "fine-tuning", "divergence", "pseudo labeling", "automated labeling", "experiments"], "TL;DR": "A technique for automatically labeling large unlabeled datasets so that they can train source models for transfer learning and its experimental evaluation. ", "abstract": "Transfer learning uses trained weights from a source model as the initial weightsfor the training of a target dataset. A well chosen source with a large numberof labeled data leads to significant improvement in accuracy. We demonstrate atechnique that automatically labels large unlabeled datasets so that they can trainsource models for transfer learning. We experimentally evaluate this method, usinga baseline dataset of human-annotated ImageNet1K labels, against five variationsof this technique. We show that the performance of these automatically trainedmodels come within 17% of baseline on average.", "pdf": "/pdf/690eef7631dd3d690867765c99a4905f551b1f1b.pdf", "paperhash": "dube|automatic_labeling_of_data_for_transfer_learning", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\ndube2019automatic,\ntitle={Automatic Labeling of Data for Transfer Learning},\nauthor={Parijat Dube and Bishwaranjan Bhattacharjee and Siyu Huo and Patrick Watson and John Kender and Brian Belgodere},\nyear={2019},\nurl={https://openreview.net/forum?id=rkxJgoRN_V}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553423078990, "details": {"replyCount": 3}} | |
| {"id": "S1ghJiRVd4", "original": "rJlbivV7uN", "number": 41, "cdate": 1553423076142, "ddate": null, "tcdate": 1553423076142, "tmdate": 1683306276337, "tddate": null, "forum": "S1ghJiRVd4", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Biomedical Named Entity Recognition via Reference-Set Augmented Bootstrapping", "authors": ["Joel Mathew", "Shobeir Fakhraei", "Jose Luis Ambite"], "authorids": ["joel@isi.edu", "shobeir@isi.edu", "ambite@isi.edu"], "keywords": ["Name Entity Recognition", "Bootstrapping", "Neural Networks", "Reference Set", "Biomedicine"], "TL;DR": "Augmented bootstrapping approach combining information from a reference set with iterative refinements of soft labels to improve Name Entity Recognition from biomedical literature.", "abstract": "We present a weakly-supervised data augmentation approach to improve Named Entity Recognition (NER) in a challenging domain: extracting biomedical entities (e.g., proteins) from the scientific literature. First, we train a neural NER (NNER) model over a small seed of fully-labeled examples. Second, we use a reference set of entity names (e.g., proteins in UniProt) to identify entity mentions with high precision, but low recall, on an unlabeled corpus. Third, we use the NNER model to assign weak labels to the corpus. Finally, we retrain our NNER model iteratively over the augmented training set, including the seed, the reference-set examples, and the weakly-labeled examples, which results in refined labels. We show empirically that this augmented bootstrapping process significantly improves NER performance, and discuss the factors impacting the efficacy of the approach.", "pdf": "/pdf/cce31522f0b741c9d68977190045e334873606e7.pdf", "paperhash": "mathew|biomedical_named_entity_recognition_via_referenceset_augmented_bootstrapping", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nmathew2019biomedical,\ntitle={Biomedical Named Entity Recognition via Reference-Set Augmented Bootstrapping},\nauthor={Joel Mathew and Shobeir Fakhraei and Jose Luis Ambite},\nyear={2019},\nurl={https://openreview.net/forum?id=S1ghJiRVd4}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553423076442, "details": {"replyCount": 2}} | |
| {"id": "SJg2iEmldV", "original": "rJeIXevCPE", "number": 33, "cdate": 1553114275545, "ddate": null, "tcdate": 1553114275545, "tmdate": 1683306275766, "tddate": null, "forum": "SJg2iEmldV", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Augmented Memory Networks for Streaming-Based Active One-Shot Learning", "authors": ["Anonymous"], "authorids": ["ahk9339@gmail.com", "massimiliano.ruocco@ntnu.no", "eliezer.souza.silva@ntnu.no", "erlend.aune@ntnu.no"], "keywords": ["Active Learning", "Reinforcement Learning", "Few-Shot Learning"], "abstract": "One of the major challenges in training deep architectures for predictive tasks is the scarcity and cost of labeled training data. Active Learning (AL) is one way of addressing this challenge. In stream-based AL, observations are continuously made available to the learner that have to decide whether to request a label or to make a prediction. The goal is to reduce the request rate while at the same time maximize prediction performance. In previous research, reinforcement learning has been used for learning the AL request/prediction strategy. In our work, we propose to equip a reinforcement learning process with memory augmented neural networks, to enhance the one-shot capabilities. Moreover, we introduce Class Margin Sampling (CMS) as an extension of the standard margin sampling to the reinforcement learning setting. This strategy aims to reduce training time and improve sample efficiency in the training process. We evaluate the proposed method on a classification task using empirical accuracy of label predictions and percentage of label requests. The results indicates that the proposed method, by making use of the memory augmented networks and CMS in the training process, outperforms existing baselines.", "pdf": "/pdf/b83d3d86b87bb6e647fe3634787fbc7f26a37fbb.pdf", "paperhash": "anonymous|augmented_memory_networks_for_streamingbased_active_oneshot_learning", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nanonymous2019augmented,\ntitle={Augmented Memory Networks for Streaming-Based Active One-Shot Learning},\nauthor={Anonymous},\nyear={2019},\nurl={https://openreview.net/forum?id=SJg2iEmldV}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553114275821, "details": {"replyCount": 4}} | |
| {"id": "BJxt7NmlON", "original": "SJl9yv8KwV", "number": 26, "cdate": 1553114144826, "ddate": null, "tcdate": 1553114144826, "tmdate": 1683306274869, "tddate": null, "forum": "BJxt7NmlON", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Disentangled Representation Learning with Information Maximizing Autoencoder", "authors": ["Kazi Nazmul Haque", "Siddique Latif", "Rajib Rana"], "authorids": ["shezan.huq@gmail.com", "siddique.latif@usq.edu.au", "rajib.rana@usq.edu.au"], "keywords": ["Disentangled Representation Learning", "Data Augmentation", "Generative Adversarial Nets", "Unsupervised Learning"], "TL;DR": "Learn disentangle representation in an unsupervised manner.", "abstract": "Learning disentangled representation from any unlabelled data is a non-trivial problem. In this paper we propose Information Maximising Autoencoder (InfoAE) where the encoder learns powerful disentangled representation through maximizing the mutual information between the representation and given information in an unsupervised fashion. We have evaluated our model on MNIST dataset and achieved approximately 98.9 % test accuracy while using complete unsupervised training.", "pdf": "/pdf/6dace85bba82fd8f9c6a41e6caa746f315308dad.pdf", "paperhash": "haque|disentangled_representation_learning_with_information_maximizing_autoencoder", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nhaque2019disentangled,\ntitle={Disentangled Representation Learning with Information Maximizing Autoencoder},\nauthor={Kazi Nazmul Haque and Siddique Latif and Rajib Rana},\nyear={2019},\nurl={https://openreview.net/forum?id=BJxt7NmlON}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553114145090, "details": {"replyCount": 3}} | |
| {"id": "B1evmEQg_V", "original": "SklUGZpuDE", "number": 21, "cdate": 1553114142521, "ddate": null, "tcdate": 1553114142521, "tmdate": 1683306274603, "tddate": null, "forum": "B1evmEQg_V", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Learning Twitter User Sentiments on Climate Change with Limited Labeled Data", "authors": ["Allison Koenecke", "Jordi Feliu-Fab\u00e0"], "authorids": ["koenecke@stanford.edu", "jfeliu@stanford.edu"], "keywords": ["Climate Change", "Twitter Data", "Sentiment Analysis", "Automated Labelling", "Cohort Analysis"], "TL;DR": "We train RNNs on famous Twitter users to determine whether the general Twitter population is more likely to believe in climate change after a natural disaster.", "abstract": "While it is well-documented that climate change accepters and deniers have become increasingly polarized in the United States over time, there has been no large-scale examination of whether these individuals are prone to changing their opinions as a result of natural external occurrences. On the sub-population of Twitter users, we examine whether climate change sentiment changes in response to five separate natural disasters occurring in the U.S. in 2018. We begin by showing that tweets can be classified with over 75% accuracy as either accepting or denying climate change when using our methodology to compensate for limited labelled data; results are robust across several machine learning models and yield geographic-level results in line with prior research. We then apply RNNs to conduct a cohort-level analysis showing that the 2018 hurricanes yielded a statistically significant increase in average tweet sentiment affirming climate change. However, this effect does not hold for the 2018 blizzard and wildfires studied, implying that Twitter users' opinions on climate change are fairly ingrained on this subset of natural disasters.", "pdf": "/pdf/738edfe96e0c51a706aa863559c2e186c7f727e4.pdf", "paperhash": "koenecke|learning_twitter_user_sentiments_on_climate_change_with_limited_labeled_data", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nkoenecke2019learning,\ntitle={Learning Twitter User Sentiments on Climate Change with Limited Labeled Data},\nauthor={Allison Koenecke and Jordi Feliu-Fab{\\`a}},\nyear={2019},\nurl={https://openreview.net/forum?id=B1evmEQg_V}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553114142818, "details": {"replyCount": 3}} | |
| {"id": "S1xU74med4", "original": "B1gop9j_vE", "number": 20, "cdate": 1553114142061, "ddate": null, "tcdate": 1553114142061, "tmdate": 1683306274464, "tddate": null, "forum": "S1xU74med4", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Skip-connection and batch-normalization improve data separation ability", "authors": ["Yasutaka Furusho", "Kazushi Ikeda"], "authorids": ["furusho.yasutaka.fm1@is.naist.jp", "kazushi@is.naist.jp"], "keywords": ["Deep learning", "ResNet", "Skip-connection", "Batch-normalization"], "TL;DR": "The Skip-connection in ResNet and the batch-normalization improve the data separation ability and help to train a deep neural network.", "abstract": "The ResNet and the batch-normalization (BN) achieved high performance even when only a few labeled data are available. However, the reasons for its high performance are unclear. To clear the reasons, we analyzed the effect of the skip-connection in ResNet and the BN on the data separation ability, which is an important ability for the classification problem. Our results show that, in the multilayer perceptron with randomly initialized weights, the angle between two input vectors converges to zero in an exponential order of its depth, that the skip-connection makes this exponential decrease into a sub-exponential decrease, and that the BN relaxes this sub-exponential decrease into a reciprocal decrease. Moreover, our analysis shows that the preservation of the angle at initialization encourages trained neural networks to separate points from different classes. These imply that the skip-connection and the BN improve the data separation ability and achieve high performance even when only a few labeled data are available.", "pdf": "/pdf/69be1b3f4cf276ef8727f3decbe4492538b44506.pdf", "paperhash": "furusho|skipconnection_and_batchnormalization_improve_data_separation_ability", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nfurusho2019skipconnection,\ntitle={Skip-connection and batch-normalization improve data separation ability},\nauthor={Yasutaka Furusho and Kazushi Ikeda},\nyear={2019},\nurl={https://openreview.net/forum?id=S1xU74med4}\n}"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1553114142343, "details": {"replyCount": 3}} | |
| {"id": "HklJQ1JEDE", "original": "H1lAGyJEDN", "number": 8, "cdate": 1552310038712, "ddate": null, "tcdate": 1552310038712, "tmdate": 1750552036791, "tddate": null, "forum": "HklJQ1JEDE", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Regularity Normalization: Constraining Implicit Space with Minimum Description Length", "authors": ["Baihan Lin"], "authorids": ["doerlbh@gmail.com"], "keywords": ["MDL", "Universal code", "LLD", "Normalization", "Biological plausibility", "Unsupervised attention", "Imbalanced data"], "TL;DR": "Considering neural network optimization process as a model selection problem, we introduce a biological plausible normalization method that extracts statistical regularity under MDL principle to tackle imbalanced and limited data issue.", "abstract": "Inspired by the adaptation phenomenon of biological neuronal firing, we propose regularity normalization: a reparameterization of the activation in the neural network that take into account the statistical regularity in the implicit space. By considering the neural network optimization process as a model selection problem, the implicit space is constrained by the normalizing factor, the minimum description length of the optimal universal code. We introduce an incremental version of computing this universal code as normalized maximum likelihood and demonstrated its flexibility to include data prior such as top-down attention and other oracle information and its compatibility to be incorporated into batch normalization and layer normalization. The preliminary results showed that the proposed method outperforms existing normalization methods in tackling the limited and imbalanced data from a non-stationary distribution benchmarked on computer vision task. As an unsupervised attention mechanism given input data, this biologically plausible normalization has the potential to deal with other complicated real-world scenarios as well as reinforcement learning setting where the rewards are sparse and non-uniform. Further research is proposed to discover these scenarios and explore the behaviors among different variants.", "pdf": "/pdf/8ccb0550dd14d532fa099e80104224635ea2e3cc.pdf", "paperhash": "lin|regularity_normalization_constraining_implicit_space_with_minimum_description_length", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nlin2019regularity,\ntitle={Regularity Normalization: Constraining Implicit Space with Minimum Description Length},\nauthor={Baihan Lin},\nyear={2019},\nurl={https://openreview.net/forum?id=HklJQ1JEDE}\n}", "community_implementations": "[ 3 code implementations](https://www.catalyzex.com/paper/regularity-normalization-constraining/code)"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1552310038801, "details": {"replyCount": 3}} | |
| {"id": "HJxbbY_7PV", "original": "B1e-WFOQvE", "number": 6, "cdate": 1552283897425, "ddate": null, "tcdate": 1552283897425, "tmdate": 1750552037153, "tddate": null, "forum": "HJxbbY_7PV", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "ONLY SPARSITY BASED LOSS FUNCTION FOR LEARNING REPRESENTATIONS", "authors": ["Vivek Bakaraju", "Kishore Reddy Konda"], "authorids": ["vivek.bakaraju@insofe.edu.in", "konda.kishorereddy@gmail.com"], "keywords": ["Sparsity", "Unsupervised Learning", "Single Layer Models"], "abstract": "We study the emergence of sparse representations in neural networks. We show that in unsupervised\nmodels with regularization, the emergence of sparsity is the result of the input data samples being\ndistributed along highly non-linear or discontinuous manifold. We also derive a similar argument\nfor discriminatively trained networks and present experiments to support this hypothesis. Based\non our study of sparsity, we introduce a new loss function which can be used as regularization\nterm for models like autoencoders and MLPs. Further, the same loss function can also be used\nas a cost function for an unsupervised single-layered neural network model for learning efficient\nrepresentations.", "pdf": "/pdf/a3f6e59530e1de38ca75899a2411107bab4ada62.pdf", "paperhash": "bakaraju|only_sparsity_based_loss_function_for_learning_representations", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nbakaraju2019only,\ntitle={{ONLY} {SPARSITY} {BASED} {LOSS} {FUNCTION} {FOR} {LEARNING} {REPRESENTATIONS}},\nauthor={Vivek Bakaraju and Kishore Reddy Konda},\nyear={2019},\nurl={https://openreview.net/forum?id=HJxbbY_7PV}\n}", "community_implementations": "[ 1 code implementation](https://www.catalyzex.com/paper/only-sparsity-based-loss-function-for/code)"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1552283897494, "details": {"replyCount": 3}} | |
| {"id": "S1feL-4gr4", "original": "rklx8ZNxrE", "number": 1, "cdate": 1549971784409, "ddate": null, "tcdate": 1549971784409, "tmdate": 1750552037888, "tddate": null, "forum": "S1feL-4gr4", "replyto": null, "invitation": "ICLR.cc/2019/Workshop/LLD/-/Blind_Submission", "content": {"title": "Prototypical Metric Transfer Learning for Continuous Speech Keyword Spotting With Limited Training Data", "authors": ["Harshita Seth", "Pulkit Kumar", "Muktabh Mayank Srivastava"], "authorids": ["harshita@paralleldots.com", "pulkit@paralleldots.com", "muktabh@paralleldots.com"], "keywords": ["Audio keyword detection", "prototypical Metric Loss", "Few-shot", "Transfer Learning"], "abstract": "Continuous Speech Keyword Spotting (CSKS) is the problem of spotting keywords in recorded conversations, when a small number of instances of keywords are available in training data. Unlike the more common Keyword Spotting, where an algorithm needs to detect lone keywords or short phrases like \"Alexa\u201d, \u201cCortana\", \u201cHi Alexa!\u201d, \u201c`Whatsup Octavia?\u201d etc. in speech, CSKS needs to filter out embedded words from a continuous flow of speech, ie. spot \u201cAnna\u201d and \u201cgithub\u201d in \u201cI know a developer named Anna who can look into this github issue.\u201d Apart from the issue of limited training data availability, CSKS is an extremely imbalanced classification problem. We address the limitations of simple keyword spotting baselines for both aforementioned challenges by using a novel combination of loss functions (Prototypical networks\u2019 loss and metric loss) and transfer learning. Our method improves F1 score by over 10%. ", "pdf": "/pdf/812d2b2c106ff7f4bb580dbeb03c2143d5dca114.pdf", "paperhash": "seth|prototypical_metric_transfer_learning_for_continuous_speech_keyword_spotting_with_limited_training_data", "venue": "Submitted to LLD 2019", "venueid": "ICLR.cc/2019/Workshop/LLD", "_bibtex": "@misc{\nseth2019prototypical,\ntitle={Prototypical Metric Transfer Learning for Continuous Speech Keyword Spotting With Limited Training Data},\nauthor={Harshita Seth and Pulkit Kumar and Muktabh Mayank Srivastava},\nyear={2019},\nurl={https://openreview.net/forum?id=S1feL-4gr4}\n}", "community_implementations": "[ 1 code implementation](https://www.catalyzex.com/paper/prototypical-metric-transfer-learning-for/code)"}, "signatures": ["ICLR.cc/2019/Workshop/LLD"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2019/Workshop/LLD"], "odate": 1549971784457, "details": {"replyCount": 3}} | |