@Article{NM-16-193, author = { H. M. Shao, W. Wu and W. B. Liu}, title = {Convergence of BP Algorithm for Training MLP with Linear Output}, journal = {Numerical Mathematics, a Journal of Chinese Universities}, year = {2007}, volume = {16}, number = {3}, pages = {193--202}, abstract = { The capability of multilayer perceptrons (MLPs) for approximating continuous functions with arbitrary accuracy has been demonstrated in the past decades. Back propagation $($BP$)$ algorithm is the most popular learning algorithm for training of MLPs. In this paper, a simple iteration formula is used to select the learning rate for each cycle of training procedure, and a convergence result is presented for the BP algorithm for training MLP with a hidden layer and a linear output unit. The monotonicity of the error function is also guaranteed during the training iteration.}, issn = {}, doi = {https://doi.org/}, url = {http://global-sci.org/intro/article_detail/nm/8053.html} }