@inproceedings{GarciaRomero2012,
abstract = {In recent years, there have been significant advances in the field of speaker recognition that has resulted in very robust recognition systems. The primary focus of many recent developments have shifted to the problem of recognizing speakers in adverse conditions, e.g in the presence of noise/reverberation. In this paper, we present the UMD-JHU speaker recognition system applied on the NIST 2010 SRE task. The novel aspects of our systems are: 1) Improved performance on trials involving different vocal effort via the use of linear-scale features; 2) Expected improved recognition performance in the presence of reverberation and noise via the use of frequency domain perceptual linear predictor and cortical features; 3) A new discriminative kernel partial least squares (KPLS) framework that complements state-of-the-art back-end systems JFA and PLDA to aid in better overall recognition; and 4) Acceleration of JFA, PLDA and KPLS back-ends via distributed computing. The individual components of the system and the fused system are compared against a baseline JFA system and results reported by SRI and MIT-LL on SRE2010. {\textcopyright} 2012 IEEE.},
author = {Garcia-Romero, D. and Zhou, X. and Zotkin, D. and Srinivasan, B. and Luo, Y. and Ganapathy, S. and Thomas, S. and Nemala, S. and Sivaram, GSVS and Mirbagheri, M. and Mallidi, SH and Janu, T. and Rajan, P. and Mesgarani, N. and Elhilali, M. and Hermansky, H. and Shamma, S. and Duraiswami, R.},
booktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
doi = {10.1109/ICASSP.2012.6288852},
isbn = {978-1-4673-0046-9},
issn = {15206149},
keywords = {Cortical,FDLP,JFA,KPLS,LFCC,NIST SRE 2010,PLDA,Speaker recognition},
pages = {4229--4232},
title = {{The UMD-JHU 2011 speaker recognition system}},
url = {http://ieeexplore.ieee.org/document/6288852/},
year = {2012}
}