@article{scholars16521, number = {16}, volume = {22}, note = {cited By 0}, doi = {10.3390/s22166088}, title = {Blind-Spot Collision Detection System for Commercial Vehicles Using Multi Deep CNN Architecture}, year = {2022}, publisher = {MDPI}, journal = {Sensors}, keywords = {Accidents; Buses; Commercial vehicles; Convolutional neural networks; Deep learning; Feature extraction; Motor transportation; Network architecture; Object recognition; Roads and streets; Vehicle safety, Blind spot collision detection for bus; Blind spot vehicle detection; Blind spots; Collision detection; Collision detection system; Convolutional neural network; Deep convolutional neural network architecture; Deep learning model; Detection system; Heavier vehicles; Heavy vehicle safety; Learning models; Neural network architecture; Road safety; Vehicle safety; Vehicles detection, Object detection, car; motor vehicle, Automobiles; Motor Vehicles; Neural Networks, Computer}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85136722733&doi=10.3390\%2fs22166088&partnerID=40&md5=d49a4d68c4bee1b783e0d668a287027a}, abstract = {Buses and heavy vehicles have more blind spots compared to cars and other road vehicles due to their large sizes. Therefore, accidents caused by these heavy vehicles are more fatal and result in severe injuries to other road users. These possible blind-spot collisions can be identified early using vision-based object detection approaches. Yet, the existing state-of-the-art vision-based object detection models rely heavily on a single feature descriptor for making decisions. In this research, the design of two convolutional neural networks (CNNs) based on high-level feature descriptors and their integration with faster R-CNN is proposed to detect blind-spot collisions for heavy vehicles. Moreover, a fusion approach is proposed to integrate two pre-trained networks (i.e., Resnet 50 and Resnet 101) for extracting high level features for blind-spot vehicle detection. The fusion of features significantly improves the performance of faster R-CNN and outperformed the existing state-of-the-art methods. Both approaches are validated on a self-recorded blind-spot vehicle detection dataset for buses and an online LISA dataset for vehicle detection. For both proposed approaches, a false detection rate (FDR) of 3.05 and 3.49 are obtained for the self recorded dataset, making these approaches suitable for real time applications. {\^A}{\copyright} 2022 by the authors.}, issn = {14248220}, author = {Muzammel, M. and Yusoff, M. Z. and Saad, M. N. M. and Sheikh, F. and Awais, M. A.} }