abstract = {Our current understanding of how the brain segregates auditory scenes into meaningful objects is in line with a Gestaltism framework. These Gestalt principles suggest a theory of how different attributes of the soundscape are extracted then bound together into separate groups that reflect different objects or streams present in the scene. These cues are thought to reflect the underlying statistical structure of natural sounds in a similar way that statistics of natural images are closely linked to the principles that guide figure-ground segregation and object segmentation in vision. In the present study, we leverage inference in stochastic neural networks to learn emergent grouping cues directly from natural soundscapes including speech, music and sounds in nature. The model learns a hierarchy of local and global spectro-temporal attributes reminiscent of simultaneous and sequential Gestalt cues that underlie the organization of auditory scenes. These mappings operate at multiple time scales to analyze an incoming complex scene and are then fused using a Hebbian network that binds together coherent features into perceptually-segregated auditory objects. The proposed architecture successfully emulates a wide range of well established auditory scene segregation phenomena and quantifies the complimentary role of segregation and binding cues in driving auditory scene segregation.},
author = {Chakrabarty, Debmalya and Elhilali, Mounya},
doi = {10.1371/journal.pcbi.1006711},
editor = {Theunissen, Fr{\'{e}}d{\'{e}}ric E.},
issn = {1553-7358},
journal = {PLOS Computational Biology},
number = {1},
pages = {e1006711},
title = {{A Gestalt inference model for auditory scene segregation}},
url = {http://dx.plos.org/10.1371/journal.pcbi.1006711},
volume = {15},
year = {2019}