{ "id": "1605.09593", "version": "v1", "published": "2016-05-31T12:11:51.000Z", "updated": "2016-05-31T12:11:51.000Z", "title": "Controlling Exploration Improves Training for Deep Neural Networks", "authors": [ "Yasutoshi Ida", "Yasuhiro Fujiwara", "Sotetsu Iwamura" ], "categories": [ "cs.LG", "cs.AI", "stat.ML" ], "abstract": "Stochastic optimization methods are widely used for training of deep neural networks. However, it is still a challenging research problem to achieve effective training by using stochastic optimization methods. This is due to the difficulties in finding good parameters on a loss function that have many saddle points. In this paper, we propose a stochastic optimization method called STDProp for effective training of deep neural networks. Its key idea is to effectively explore parameters on a complex surface of a loss function. We additionally develop momentum version of STDProp. While our approaches are easy to implement with high memory efficiency, it is more effective than other practical stochastic optimization methods for deep neural networks.", "revisions": [ { "version": "v1", "updated": "2016-05-31T12:11:51.000Z" } ], "analyses": { "keywords": [ "deep neural networks", "controlling exploration", "loss function", "high memory efficiency", "practical stochastic optimization methods" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }