@article{scholars17131, year = {2022}, journal = {Neural Computing and Applications}, publisher = {Springer Science and Business Media Deutschland GmbH}, pages = {1837--1875}, number = {3}, note = {cited By 21}, volume = {34}, doi = {10.1007/s00521-021-06807-9}, title = {A comprehensive review of deep neuro-fuzzy system architectures and their optimization methods}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85122803339&doi=10.1007\%2fs00521-021-06807-9&partnerID=40&md5=115fc4364c86240d29b72de307395c23}, keywords = {Deep neural networks; Fuzzy inference; Fuzzy neural networks; Fuzzy systems; Memory architecture; Network architecture; Optimization, Deep neuro-fuzzy system; Derivative-based optimization; Derivative-free optimization; Fuzzy inference systems; Fuzzy system modeling; Metaheuristic; Neuro-fuzzy system architecture; Neurofuzzy system; Optimisations; Optimization method, Heuristic algorithms}, abstract = {Deep neuro-fuzzy systems (DNFSs) have been successfully applied to real-world problems using the efficient learning process of deep neural networks (DNNs) and reasoning aptitude from fuzzy inference systems (FIS). This study provides a comprehensive review of DNFS dividing it into two essential parts. The first part aims to provide a thorough understanding of DNFS and its architectural representation, whereas the second part reviews DNFS optimization methods. This study aims to assist researchers in understanding the various ways DNFS models are developed by hybridizing DNN and FIS, as well as gradient (derivative)-based methods and metaheuristics (derivative-free) optimization, as discussed in the literature. This study revealed that the proposed DNFS architectures performed 11.6 better than non-fuzzy models, with an overall accuracy of 81.4. The investigation based on optimization methods revealed that DNFS with metaheuristics optimization methods has shown an overall accuracy of 93.56, which is 21.10 higher than the DNFS models using gradient-based methods. Additionally, this study showed that DNFS networks presented in the literature have integrated DNN with typical FIS, although more satisfactory results can be obtained using a new generation of FIS termed fractional FIS (FFIS) and Mamdani complex FIS (M-CFIS). Besides, dynamic neural networks are suggested in the replacement of static DNNs to facilitate dynamic learning. Some studies have also demonstrated the optimization of DNFS using classical gradient-based approaches that can affect network performance when solving highly nonlinear problems. This study suggests implementing optimization methods with new and improvised metaheuristics to improve the training and performance of the models. {\^A}{\copyright} 2021, The Author(s), under exclusive licence to Springer-Verlag London Ltd., part of Springer Nature.}, author = {Talpur, N. and Abdulkadir, S. J. and Alhussian, H. and Hasan, M. H. and Aziz, N. and Bamhdi, A.}, issn = {09410643} }