@inproceedings{scholars5724, pages = {26--31}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, journal = {Proceedings - 2014 International Conference on Computer Assisted System in Health, CASH 2014}, title = {Emotion Detection through Speech and Facial Expressions}, year = {2015}, doi = {10.1109/CASH.2014.22}, note = {cited By 4; Conference of 1st International Conference on Computer Assisted System in Health, CASH 2014 ; Conference Date: 19 December 2014 Through 21 December 2014; Conference Code:118343}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962082444&doi=10.1109\%2fCASH.2014.22&partnerID=40&md5=cecbc5ea8f6d4b0773059954b1649b01}, keywords = {Bins; Face recognition; Support vector machines, Emotion detection; Facial Expressions; Frequency coefficient; Fusion techniques; Human machine interaction; Multimodal system; Subimages; System response time, Human computer interaction}, abstract = {Human machine interaction is one of the most burgeoning area of research in the field of information technology. To date a majority of research in this field has been conducted using unimodal and multimodal systems with asynchronous data. Because of the above, the improper synchronization, which has become a common problem, due to that, the system complexity increases and the system response time decreases. To counter this problem, a novel approach has been introduced to predict human emotions using human speech and facial expressions. The approach uses two feature vectors, namely, relative bin frequency coefficient (RBFC) and relative sub-image based coefficient (RSB) for speech and visual data respectively. Support vector machine with radial basis kernel is used for feature level classification based fusion technique between two modalities. The proposed novel approach has resulted in galvanizing results for a myriad of inputs and can be adapted to asynchronous data. {\^A}{\copyright} 2014 IEEE.}, author = {Kudiri, K. M. and Said, A. M. and Nayan, M. Y.}, isbn = {9781479988228} }