@inproceedings{9bed3eae868b4f3f88c3b0c06370bf10,
title = "Predicting Emotions Perceived from Sounds",
abstract = "Sonification is the science of communication of data and events to users through sounds. Auditory icons, earcons, and speech are the common auditory display schemes utilized in sonification, or more specifically in the use of audio to convey information. Once the captured data are perceived, their meanings, and more importantly, intentions can be interpreted more easily and thus can be employed as a complement to visualization techniques. Through auditory perception it is possible to convey information related to temporal, spatial, or some other context-oriented information. An important research question is whether the emotions perceived from these auditory icons or earcons are predictable in order to build an automated sonification platform. This paper conducts an experiment through which several mainstream and conventional machine learning algorithms are developed to study the prediction of emotions perceived from sounds. To do so, the key features of sounds are captured and then are modeled using machine learning algorithms using feature reduction techniques. We observe that it is possible to predict perceived emotions with high accuracy. In particular, the regression based on Random Forest demonstrated its superiority compared to other machine learning algorithms.",
keywords = "Emo-Soundscape, Emotion prediction, machine learning, perceived emotion, sound",
author = "Faranak Abri and Gutierrez, {Luis Felipe} and {Siami Namin}, Akbar and Sears, {David R.W.} and Jones, {Keith S.}",
note = "Funding Information: This study is the first step towards modeling emotions perceived from sounds and through arousal and valence. Additional studies are needed to investigate whether other types of sound features other than acoustic features, could be also useful for prediction purposes. Furthermore, this paper explored conventional machine learning algorithms. The emerging approaches in deep learning analysis might provide better predictions for perceived emotions. 7 frct802n-12cE This research work is supported by National Science Foundation (NSF) under Grant No: 1564293. Publisher Copyright: {\textcopyright} 2020 IEEE.; null ; Conference date: 10-12-2020 Through 13-12-2020",
year = "2020",
month = dec,
day = "10",
doi = "10.1109/BigData50022.2020.9377842",
language = "English",
series = "Proceedings - 2020 IEEE International Conference on Big Data, Big Data 2020",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "2057--2064",
editor = "Xintao Wu and Chris Jermaine and Li Xiong and Hu, {Xiaohua Tony} and Olivera Kotevska and Siyuan Lu and Weijia Xu and Srinivas Aluru and Chengxiang Zhai and Eyhab Al-Masri and Zhiyuan Chen and Jeff Saltz",
booktitle = "Proceedings - 2020 IEEE International Conference on Big Data, Big Data 2020",
}