This paper presents an investigation of the approximation property of neural
networks with unbounded activation functions, such as the rectified linear unit
(ReLU), which is the new de-facto standard of deep learning. The ReLU network
can be analyzed by the ridgelet transform with respect to Lizorkin
distributions. By showing three reconstruction formulas by using the Fourier
slice theorem, the Radon transform, and Parseval's relation, it is shown that a
neural network with unbounded activation functions still satisfies the
universal approximation property. As an additional consequence, the ridgelet
transform, or the backprojection filter in the Radon domain, is what the
network learns after backpropagation. Subject to a constructive admissibility
condition, the trained network can be obtained by simply discretizing the
ridgelet transform, without backpropagation. Numerical examples not only
support the consistency of the admissibility condition but also imply that some
non-admissible cases result in low-pass filtering.
Description
[1505.03654] Neural Network with Unbounded Activation Functions is Universal Approximator
%0 Journal Article
%1 sonoda2015neural
%A Sonoda, Sho
%A Murata, Noboru
%D 2015
%K approximate deep-learning learning readings theory
%R 10.1016/j.acha.2015.12.005
%T Neural Network with Unbounded Activation Functions is Universal
Approximator
%U http://arxiv.org/abs/1505.03654
%X This paper presents an investigation of the approximation property of neural
networks with unbounded activation functions, such as the rectified linear unit
(ReLU), which is the new de-facto standard of deep learning. The ReLU network
can be analyzed by the ridgelet transform with respect to Lizorkin
distributions. By showing three reconstruction formulas by using the Fourier
slice theorem, the Radon transform, and Parseval's relation, it is shown that a
neural network with unbounded activation functions still satisfies the
universal approximation property. As an additional consequence, the ridgelet
transform, or the backprojection filter in the Radon domain, is what the
network learns after backpropagation. Subject to a constructive admissibility
condition, the trained network can be obtained by simply discretizing the
ridgelet transform, without backpropagation. Numerical examples not only
support the consistency of the admissibility condition but also imply that some
non-admissible cases result in low-pass filtering.
@article{sonoda2015neural,
abstract = {This paper presents an investigation of the approximation property of neural
networks with unbounded activation functions, such as the rectified linear unit
(ReLU), which is the new de-facto standard of deep learning. The ReLU network
can be analyzed by the ridgelet transform with respect to Lizorkin
distributions. By showing three reconstruction formulas by using the Fourier
slice theorem, the Radon transform, and Parseval's relation, it is shown that a
neural network with unbounded activation functions still satisfies the
universal approximation property. As an additional consequence, the ridgelet
transform, or the backprojection filter in the Radon domain, is what the
network learns after backpropagation. Subject to a constructive admissibility
condition, the trained network can be obtained by simply discretizing the
ridgelet transform, without backpropagation. Numerical examples not only
support the consistency of the admissibility condition but also imply that some
non-admissible cases result in low-pass filtering.},
added-at = {2020-01-29T17:05:23.000+0100},
author = {Sonoda, Sho and Murata, Noboru},
biburl = {https://www.bibsonomy.org/bibtex/2ba938def8feb259f4e1dd560551bf880/kirk86},
description = {[1505.03654] Neural Network with Unbounded Activation Functions is Universal Approximator},
doi = {10.1016/j.acha.2015.12.005},
interhash = {d1ad66e96c0c8cc8d6b0574c854eaf87},
intrahash = {ba938def8feb259f4e1dd560551bf880},
keywords = {approximate deep-learning learning readings theory},
note = {cite arxiv:1505.03654Comment: under review; first revised version},
timestamp = {2020-01-29T17:05:23.000+0100},
title = {Neural Network with Unbounded Activation Functions is Universal
Approximator},
url = {http://arxiv.org/abs/1505.03654},
year = 2015
}