@inproceedings{Slaney2012,
abstract = {Parsing complex acoustic scenes involves an intricate inter- play between bottom-up, stimulus-driven salient elements in the scene with top-down, goal-directed, mechanisms that shift our attention to particular parts of the scene. Here, we present a framework for exploring the interaction between these two processes in a simulated cocktail party setting. The model shows improved digit recognition in a multi-talker environ- ment with a goal of tracking the source uttering the highest value. This work highlights the relevance of both data-driven and goal-driven processes in tackling real multi-talker, multi- source sound analysis.},
author = {Slaney, Malcolm and Agus, Trevor and Liu, Shih-Chii and Kaya, Merve and Elhilali, Mounya},
booktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
doi = {10.1109/ICASSP.2012.6287838},
isbn = {978-1-4673-0046-9},
issn = {15206149},
keywords = {Attention,Auditory Scene Analysis,Cognition,Digit Recognition,Saliency},
pages = {145--148},
title = {{A model of attention-driven scene analysis}},
url = {http://ieeexplore.ieee.org/document/6287838/},
year = {2012}
}