The purpose of this paper is to investigate neural network capability systematically. The main results are: 1) every Tauber-Wiener function is qualified as an activation function in the hidden layer of a three-layered neural network; 2) for a continuous function in S'(R/sup 1/) to be a Tauber-Wiener function, the necessary and sufficient condition is that it is not a polynomial; 3) the capability of approximating nonlinear functionals defined on some compact set of a Banach space and nonlinear operators has been shown; and 4) the possibility by neural computation to approximate the output as a whole (not at a fixed point) of a dynamical system, thus identifying the system.<>
Description
Universal approximation to nonlinear operators by neural networks with arbitrary activation functions and its application to dynamical systems - IEEE Journals & Magazine
%0 Journal Article
%1 392253
%A Tianping Chen,
%A Hong Chen,
%D 1995
%J IEEE Transactions on Neural Networks
%K approximate deep-learning readings theory
%N 4
%P 911-917
%R 10.1109/72.392253
%T Universal approximation to nonlinear operators by neural networks with arbitrary activation functions and its application to dynamical systems
%U https://ieeexplore.ieee.org/document/392253
%V 6
%X The purpose of this paper is to investigate neural network capability systematically. The main results are: 1) every Tauber-Wiener function is qualified as an activation function in the hidden layer of a three-layered neural network; 2) for a continuous function in S'(R/sup 1/) to be a Tauber-Wiener function, the necessary and sufficient condition is that it is not a polynomial; 3) the capability of approximating nonlinear functionals defined on some compact set of a Banach space and nonlinear operators has been shown; and 4) the possibility by neural computation to approximate the output as a whole (not at a fixed point) of a dynamical system, thus identifying the system.<>
@article{392253,
abstract = {The purpose of this paper is to investigate neural network capability systematically. The main results are: 1) every Tauber-Wiener function is qualified as an activation function in the hidden layer of a three-layered neural network; 2) for a continuous function in S'(R/sup 1/) to be a Tauber-Wiener function, the necessary and sufficient condition is that it is not a polynomial; 3) the capability of approximating nonlinear functionals defined on some compact set of a Banach space and nonlinear operators has been shown; and 4) the possibility by neural computation to approximate the output as a whole (not at a fixed point) of a dynamical system, thus identifying the system.<>},
added-at = {2020-06-09T18:58:33.000+0200},
author = {{Tianping Chen} and {Hong Chen}},
biburl = {https://www.bibsonomy.org/bibtex/28c23373629a50e901de5401535c9be10/kirk86},
description = {Universal approximation to nonlinear operators by neural networks with arbitrary activation functions and its application to dynamical systems - IEEE Journals & Magazine},
doi = {10.1109/72.392253},
interhash = {a4469adfce7087b2808b056d6c034f3e},
intrahash = {8c23373629a50e901de5401535c9be10},
issn = {1941-0093},
journal = {IEEE Transactions on Neural Networks},
keywords = {approximate deep-learning readings theory},
month = {July},
number = 4,
pages = {911-917},
timestamp = {2020-06-09T18:58:33.000+0200},
title = {Universal approximation to nonlinear operators by neural networks with arbitrary activation functions and its application to dynamical systems},
url = {https://ieeexplore.ieee.org/document/392253},
volume = 6,
year = 1995
}