abstract = {Deep neural networks have been recently shown to capture intricate information transformation of signals from the sensory profiles to semantic representations that facilitate recognition or discrimination of complex stimuli. In this vein, convolution neural networks (CNNs) have been used very successfully in image and audio classification. Designed to imitate the hierarchical structure of the nervous system, CNNs reflect activation with increasing degrees of complexity that transform the incoming signal onto object-level representations. In this work, we employ a CNN trained for large-scale object classification to gain insights about the contribution of various audio representations that guide sound perception. The analysis contrasts activation of different layers of a convolutional neural network with acoustic features extracted directly from the scenes, perceptual salience obtained from behavioral responses of human listeners, as well as neural oscillations recorded by Electroencephalography (EEG) in response to the same natural scenes. All three measures are tightly linked quantities believed to guide percepts of salience and object formation when listening to complex scenes. The results paint a picture of the intricate interplay between low-level and object-level representations in guiding auditory salience that is very much dependent on context and sound category.},
author = {Huang, Nicholas and Slaney, Malcolm and Elhilali, Mounya},
doi = {10.3389/fnins.2018.00532},
issn = {1662-453X},
journal = {Frontiers in Neuroscience},
keywords = {Audio classification,Auditory salience,Convolutional Neural Network,Electroencephalography,deep learning,natural scenes},
number = {532},
title = {{Connecting deep neural networks to physical, perceptual, and electrophysiological auditory signals}},
url = {https://www.frontiersin.org/article/10.3389/fnins.2018.00532/full},
volume = {12},
year = {2018}