@article{fedorishin_mohan_jawade_setlur_govindaraju_2022, title={Hear The Flow: Optical Flow-Based Self-Supervised Visual Sound Source Localization}, abstractNote={Learning to localize the sound source in videos without explicit annotations is a novel area of audio-visual research. Existing work in this area focuses on creating attention maps to capture the correlation between the two modalities to localize the source of the sound. In a video, oftentimes, the objects exhibiting movement are the ones generating the sound. In this work, we capture this characteristic by modeling the optical flow in a video as a prior to better aid in localizing the sound source. We further demonstrate that the addition of flow-based attention substantially improves visual sound source localization. Finally, we benchmark our method on standard sound source localization datasets and achieve state-of-the-art performance on the Soundnet Flickr and VGG Sound Source datasets. Code: https://github.com/denfed/heartheflow.}, author={Fedorishin and Mohan and Jawade and Setlur and Govindaraju}, year={2022}, month={Nov} }