Simulating physical systems is a core component of scientific computing,
encompassing a wide range of physical domains and applications. Recently, there
has been a surge in data-driven methods to complement traditional numerical
simulations methods, motivated by the opportunity to reduce computational costs
and/or learn new physical models leveraging access to large collections of
data. However, the diversity of problem settings and applications has led to a
plethora of approaches, each one evaluated on a different setup and with
different evaluation metrics. We introduce a set of benchmark problems to take
a step towards unified benchmarks and evaluation protocols. We propose four
representative physical systems, as well as a collection of both widely used
classical time integrators and representative data-driven methods
(kernel-based, MLP, CNN, nearest neighbors). Our framework allows evaluating
objectively and systematically the stability, accuracy, and computational
efficiency of data-driven methods. Additionally, it is configurable to permit
adjustments for accommodating other learning tasks and for establishing a
foundation for future developments in machine learning for scientific
computing.
Beschreibung
An Extensible Benchmark Suite for Learning to Simulate Physical Systems - 2108.07799.pdf
%0 Generic
%1 otness2021extensible
%A Otness, Karl
%A Gjoka, Arvi
%A Bruna, Joan
%A Panozzo, Daniele
%A Peherstorfer, Benjamin
%A Schneider, Teseo
%A Zorin, Denis
%D 2021
%K benchmark dataset neuralpde todo:read
%T An Extensible Benchmark Suite for Learning to Simulate Physical Systems
%U http://arxiv.org/abs/2108.07799
%X Simulating physical systems is a core component of scientific computing,
encompassing a wide range of physical domains and applications. Recently, there
has been a surge in data-driven methods to complement traditional numerical
simulations methods, motivated by the opportunity to reduce computational costs
and/or learn new physical models leveraging access to large collections of
data. However, the diversity of problem settings and applications has led to a
plethora of approaches, each one evaluated on a different setup and with
different evaluation metrics. We introduce a set of benchmark problems to take
a step towards unified benchmarks and evaluation protocols. We propose four
representative physical systems, as well as a collection of both widely used
classical time integrators and representative data-driven methods
(kernel-based, MLP, CNN, nearest neighbors). Our framework allows evaluating
objectively and systematically the stability, accuracy, and computational
efficiency of data-driven methods. Additionally, it is configurable to permit
adjustments for accommodating other learning tasks and for establishing a
foundation for future developments in machine learning for scientific
computing.
@misc{otness2021extensible,
abstract = {Simulating physical systems is a core component of scientific computing,
encompassing a wide range of physical domains and applications. Recently, there
has been a surge in data-driven methods to complement traditional numerical
simulations methods, motivated by the opportunity to reduce computational costs
and/or learn new physical models leveraging access to large collections of
data. However, the diversity of problem settings and applications has led to a
plethora of approaches, each one evaluated on a different setup and with
different evaluation metrics. We introduce a set of benchmark problems to take
a step towards unified benchmarks and evaluation protocols. We propose four
representative physical systems, as well as a collection of both widely used
classical time integrators and representative data-driven methods
(kernel-based, MLP, CNN, nearest neighbors). Our framework allows evaluating
objectively and systematically the stability, accuracy, and computational
efficiency of data-driven methods. Additionally, it is configurable to permit
adjustments for accommodating other learning tasks and for establishing a
foundation for future developments in machine learning for scientific
computing.},
added-at = {2023-12-05T22:15:48.000+0100},
author = {Otness, Karl and Gjoka, Arvi and Bruna, Joan and Panozzo, Daniele and Peherstorfer, Benjamin and Schneider, Teseo and Zorin, Denis},
biburl = {https://www.bibsonomy.org/bibtex/2b6ad5a4e264bcb1c873e6f964f5164f6/annakrause},
description = {An Extensible Benchmark Suite for Learning to Simulate Physical Systems - 2108.07799.pdf},
interhash = {6074cc311b5a8a30383b7bef425c0b34},
intrahash = {b6ad5a4e264bcb1c873e6f964f5164f6},
keywords = {benchmark dataset neuralpde todo:read},
note = {cite arxiv:2108.07799Comment: Accepted to NeurIPS 2021 track on datasets and benchmarks},
timestamp = {2023-12-13T20:15:18.000+0100},
title = {An Extensible Benchmark Suite for Learning to Simulate Physical Systems},
url = {http://arxiv.org/abs/2108.07799},
year = 2021
}