abstract = {To understand our surroundings, we effortlessly parse our sound environment into sound sources, extracting invariant information—or regularities—over time to build an internal representation of the world around us. Previous experimental work has shown the brain is sensitive to many types of regularities in sound, but theoretical models that capture underlying principles of regularity tracking across diverse sequence structures have been few and far between. Existing efforts often focus on sound patterns rather the stochastic nature of sequences. In the current study, we employ a perceptual model for regularity extraction based on a Bayesian framework that posits the brain collects statistical information over time. We show this model can be used to simulate various results from the literature with stimuli exhibiting a wide range of predictability. This model can provide a useful tool for both interpreting existing experimental results under a unified model and providing predictions for new ones using more complex stimuli.},
author = {Skerritt-Davis, Benjamin and Elhilali, Mounya},
doi = {10.3813/AAA.919279},
issn = {1610-1928},
journal = {Acta Acustica united with Acustica},
number = {1},
pages = {1--4},
title = {{A Model for Statistical Regularity Extraction from Dynamic Sounds}},
url = {https://www.ingentaconnect.com/content/10.3813/AAA.919279},
volume = {105},
year = {2019}