@inproceedings{nemala2012ICASSP,
abstract = {In the real world, natural conversational speech is an amalgam of speech segments, silences and environmental/ background and channel effects. Labeling the different regions of an acoustic signal according to their information levels would greatly benefit all automatic speech processing tasks. In the current work, we propose a novel segmentation approach based on a perception-based measure of speech intelligibility. Unlike segmentation approaches based on various forms of voice-activity detection (VAD), the proposed parsing approach exploits higher-level perceptual information about signal intelligibility levels. This labeling information is integrated into a novel multilevel framework for automatic speaker recognition task. The system processes the input acoustic signal along independent streams reflecting various levels of intelligibility and then fusing the decision scores from the multiple steams according to their intelligibility contribution. Our results show that the proposed system achieves significant improvements over standard baseline and VAD-based approaches, and attains a performance similar to the one obtained with oracle speech segmentation information. {\textcopyright} 2012 IEEE.},
author = {Nemala, Sridhar Krishna and Elhilali, Mounya},
booktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
doi = {10.1109/ICASSP.2012.6288893},
isbn = {978-1-4673-0046-9},
issn = {15206149},
keywords = {Noise robustness,Speaker recognition,Speech intelligibility,Voice-activity detection},
pages = {4393--4396},
title = {{Multilevel speech intelligibility for robust speaker recognition}},
url = {http://ieeexplore.ieee.org/document/6288893/},
year = {2012}
}