@Article{CiCP-36-1090, author = {Wang , ZhiweiZhang , LuluZhang , Zhongwang and Xu , Zhi-Qin John}, title = {Loss Jump During Loss Switch in Solving PDEs with Neural Networks}, journal = {Communications in Computational Physics}, year = {2024}, volume = {36}, number = {4}, pages = {1090--1112}, abstract = {
Using neural networks to solve partial differential equations (PDEs) is gaining popularity as an alternative approach in the scientific computing community. Neural networks can integrate different types of information into the loss function. These include observation data, governing equations, and variational forms, etc. These loss functions can be broadly categorized into two types: observation data loss directly constrains and measures the model output, while other loss functions indirectly model the performance of the network, which can be classified as model loss. However, this alternative approach lacks a thorough understanding of its underlying mechanisms, including theoretical foundations and rigorous characterization of various phenomena. This work focuses on investigating how different loss functions impact the training of neural networks for solving PDEs. We discover a stable loss-jump phenomenon: when switching the loss function from the data loss to the model loss, which includes different orders of derivative information, the neural network solution significantly deviates from the exact solution immediately. Further experiments reveal that this phenomenon arises from the different frequency preferences of neural networks under different loss functions. We theoretically analyze the frequency preference of neural networks under model loss. This loss-jump phenomenon provides a valuable perspective for examining the underlying mechanisms of neural networks in solving PDEs.
}, issn = {1991-7120}, doi = {https://doi.org/10.4208/cicp.OA-2024-0096}, url = {http://global-sci.org/intro/article_detail/cicp/23487.html} }