@inproceedings{thakkar2024icassp,
abstract = {Auditory Attention Decoding (AAD) algorithms play a crucial role in isolating desired sound sources within challenging acoustic environments directly from brain activity. Although recent research has shown promise in AAD using shallow representations such as auditory envelope and spectrogram, there has been limited exploration of deep Self-Supervised (SS) representations on a larger scale. In this study, we undertake a comprehensive investigation into the performance of linear decoders across 12 deep and 2 shallow representations, applied to EEG data from multiple studies spanning 57 subjects and multiple languages. Our experimental results consistently reveal the superiority of deep features for AAD at decoding background speakers, regardless of the datasets and analysis windows. This result indicates possible nonlinear encoding of unattended signals in the brain that are revealed using deep nonlinear features. Additionally, we analyze the impact of different layers of SS representations and window sizes on AAD performance. These findings underscore the potential for enhancing EEG-based AAD systems through the integration of deep feature representations.},
author = {Thakkar, Karan and Hai, Jiarui and Elhilali, Mounya},
booktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
doi = {10.1109/ICASSP48485.2024.10448271},
isbn = {979-8-3503-4485-1},
pages = {1241--1245},
title = {{Investigating Self-Supervised Deep Representations for EEG-Based Auditory Attention Decoding}},
url = {https://ieeexplore.ieee.org/document/10448271/},
year = {2024}
}