@article{zhang_puspitasari_zheng_li_qiao_kang_shan_zhang_qin_rameau_et al._2023, title={A Survey on Segment Anything Model (SAM): Vision Foundation Model Meets Prompt Engineering}, abstractNote={Segment anything model (SAM) developed by Meta AI Research has recently attracted significant attention. Trained on a large segmentation dataset of over 1 billion masks, SAM is capable of segmenting any object on a certain image. In the original SAM work, the authors turned to zero-short transfer tasks (like edge detection) for evaluating the performance of SAM. Recently, numerous works have attempted to investigate the performance of SAM in various scenarios to recognize and segment objects. Moreover, numerous projects have emerged to show the versatility of SAM as a foundation model by combining it with other models, like Grounding DINO, Stable Diffusion, ChatGPT, etc. With the relevant papers and projects increasing exponentially, it is challenging for the readers to catch up with the development of SAM. To this end, this work conducts the first yet comprehensive survey on SAM. This is an ongoing project and we intend to update the manuscript on a regular basis. Therefore, readers are welcome to contact us if they complete new works related to SAM so that we can include them in our next version.}, author={Zhang and Puspitasari and Zheng and Li and Qiao and Kang and Shan and Zhang and Qin and Rameau and et al.}, year={2023}, month={Jul} }