@Article{CSIAM-AM-1-296, author = {Dai , Yu-Hong and Zhang , Liwei}, title = {Optimality Conditions for Constrained Minimax Optimization}, journal = {CSIAM Transactions on Applied Mathematics}, year = {2020}, volume = {1}, number = {2}, pages = {296--315}, abstract = {

Minimax optimization problems arises from both modern machine learning including generative adversarial networks, adversarial training and multi-agent reinforcement learning, as well as from tradition research areas such as saddle point problems, numerical partial differential equations and optimality conditions of equality constrained optimization. For the unconstrained continuous nonconvex-nonconcave situation, Jin, Netrapalli and Jordan (2019) carefully considered the very basic question: what is a proper definition of local optima of a minimax optimization problem, and proposed a proper definition of local optimality called local minimax. We shall extend the definition of local minimax point to constrained nonconvex-nonconcave minimax optimization problems. By analyzing Jacobian uniqueness conditions for the lower-level maximization problem and the strong regularity of Karush-Kuhn-Tucker conditions of the maximization problem, we provide both necessary optimality conditions and sufficient optimality conditions for the local minimax points of constrained minimax optimization problems.

}, issn = {2708-0579}, doi = {https://doi.org/10.4208/csiam-am.2020-0014}, url = {http://global-sci.org/intro/article_detail/csiam-am/17180.html} }