@inproceedings{165, author = {Scott Fujimoto and Herke van Hoof and David Meger}, title = {Addressing Function Approximation Error in Actor-Critic Methods}, abstract = {In value-based reinforcement learning methods such as deep Q-learning, function approximation errors are known to lead to overestimated value estimates and suboptimal policies. We show that this problem persists in an actor-critic setting and propose novel mechanisms to minimize its effects on both the actor and the critic. Our algorithm builds on Double Q-learning, by taking the minimum value between a pair of critics to limit overestimation. We draw the connection between target networks and overestimation bias, and suggest delaying policy updates to reduce per-update error and further improve performance. We evaluate our method on the suite of OpenAI gym tasks, outperforming the state of the art in every environment tested.}, year = {2018}, volume = {80}, pages = {1587-1596}, month = {07/2018}, publisher = {PMLR}, address = {Stockholmsmässan, Stockholm Sweden}, url = {http://proceedings.mlr.press/v80/fujimoto18a.html}, }