Recent work in scientific machine learning has developed so-called
physics-informed neural network (PINN) models. The typical approach is to
incorporate physical domain knowledge as soft constraints on an empirical loss
function and use existing machine learning methodologies to train the model. We
demonstrate that, while existing PINN methodologies can learn good models for
relatively trivial problems, they can easily fail to learn relevant physical
phenomena for even slightly more complex problems. In particular, we analyze
several distinct situations of widespread physical interest, including learning
differential equations with convection, reaction, and diffusion operators. We
provide evidence that the soft regularization in PINNs, which involves
PDE-based differential operators, can introduce a number of subtle problems,
including making the problem more ill-conditioned. Importantly, we show that
these possible failure modes are not due to the lack of expressivity in the NN
architecture, but that the PINN's setup makes the loss landscape very hard to
optimize. We then describe two promising solutions to address these failure
modes. The first approach is to use curriculum regularization, where the PINN's
loss term starts from a simple PDE regularization, and becomes progressively
more complex as the NN gets trained. The second approach is to pose the problem
as a sequence-to-sequence learning task, rather than learning to predict the
entire space-time at once. Extensive testing shows that we can achieve up to
1-2 orders of magnitude lower error with these methods as compared to regular
PINN training.
%0 Generic
%1 krishnapriyan2021characterizing
%A Krishnapriyan, Aditi S.
%A Gholami, Amir
%A Zhe, Shandian
%A Kirby, Robert M.
%A Mahoney, Michael W.
%D 2021
%K failure neuralpde pinn todo:read
%T Characterizing possible failure modes in physics-informed neural
networks
%U http://arxiv.org/abs/2109.01050
%X Recent work in scientific machine learning has developed so-called
physics-informed neural network (PINN) models. The typical approach is to
incorporate physical domain knowledge as soft constraints on an empirical loss
function and use existing machine learning methodologies to train the model. We
demonstrate that, while existing PINN methodologies can learn good models for
relatively trivial problems, they can easily fail to learn relevant physical
phenomena for even slightly more complex problems. In particular, we analyze
several distinct situations of widespread physical interest, including learning
differential equations with convection, reaction, and diffusion operators. We
provide evidence that the soft regularization in PINNs, which involves
PDE-based differential operators, can introduce a number of subtle problems,
including making the problem more ill-conditioned. Importantly, we show that
these possible failure modes are not due to the lack of expressivity in the NN
architecture, but that the PINN's setup makes the loss landscape very hard to
optimize. We then describe two promising solutions to address these failure
modes. The first approach is to use curriculum regularization, where the PINN's
loss term starts from a simple PDE regularization, and becomes progressively
more complex as the NN gets trained. The second approach is to pose the problem
as a sequence-to-sequence learning task, rather than learning to predict the
entire space-time at once. Extensive testing shows that we can achieve up to
1-2 orders of magnitude lower error with these methods as compared to regular
PINN training.
@misc{krishnapriyan2021characterizing,
abstract = {Recent work in scientific machine learning has developed so-called
physics-informed neural network (PINN) models. The typical approach is to
incorporate physical domain knowledge as soft constraints on an empirical loss
function and use existing machine learning methodologies to train the model. We
demonstrate that, while existing PINN methodologies can learn good models for
relatively trivial problems, they can easily fail to learn relevant physical
phenomena for even slightly more complex problems. In particular, we analyze
several distinct situations of widespread physical interest, including learning
differential equations with convection, reaction, and diffusion operators. We
provide evidence that the soft regularization in PINNs, which involves
PDE-based differential operators, can introduce a number of subtle problems,
including making the problem more ill-conditioned. Importantly, we show that
these possible failure modes are not due to the lack of expressivity in the NN
architecture, but that the PINN's setup makes the loss landscape very hard to
optimize. We then describe two promising solutions to address these failure
modes. The first approach is to use curriculum regularization, where the PINN's
loss term starts from a simple PDE regularization, and becomes progressively
more complex as the NN gets trained. The second approach is to pose the problem
as a sequence-to-sequence learning task, rather than learning to predict the
entire space-time at once. Extensive testing shows that we can achieve up to
1-2 orders of magnitude lower error with these methods as compared to regular
PINN training.},
added-at = {2023-07-21T17:55:47.000+0200},
author = {Krishnapriyan, Aditi S. and Gholami, Amir and Zhe, Shandian and Kirby, Robert M. and Mahoney, Michael W.},
biburl = {https://www.bibsonomy.org/bibtex/2c5301ba9db42fad0ab54bf9b03dd355b/annakrause},
description = {2109.01050.pdf},
interhash = {9b6d3ea4620b6bc879b040950ec5f5dd},
intrahash = {c5301ba9db42fad0ab54bf9b03dd355b},
keywords = {failure neuralpde pinn todo:read},
note = {cite arxiv:2109.01050Comment: 22 pages},
timestamp = {2023-07-21T17:55:47.000+0200},
title = {Characterizing possible failure modes in physics-informed neural
networks},
url = {http://arxiv.org/abs/2109.01050},
year = 2021
}