@inproceedings{bb235200, AUTHOR = "Dong, Z.K. and Liu, X. and Chen, B. and Polak, P. and Zhang, P.", TITLE = "MuseChat: A Conversational Music Recommendation System for Videos", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "12775-12785", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230175"} @inproceedings{bb235201, AUTHOR = "Li, F. and Jiang, Q. and Zhang, H. and Ren, T. and Liu, S.L. and Zou, X. and Xu, H.Z. and Li, H.Y. and Yang, J.W. and Li, C.Y. and Zhang, L. and Gao, J.F.", TITLE = "Visual in-Context Prompting", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "12861-12871", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230176"} @inproceedings{bb235202, AUTHOR = "Sachdeva, R. and Zisserman, A.", TITLE = "The Manga Whisperer: Automatically Generating Transcriptions for Comics", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "12967-12976", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230177"} @inproceedings{bb235203, AUTHOR = "Du, Y.Y. and Wang, X.C. and Chen, C. and Ye, J. and Wang, Y. and Li, P. and Yan, M. and Zhang, J. and Huang, F. and Sui, Z.F. and Sun, M. and Liu, Y.", TITLE = "AdaMMS: Model Merging for Heterogeneous Multimodal Large Language Models with Unsupervised Coefficient Optimization", BOOKTITLE = CVPR25, YEAR = "2025", PAGES = "9413-9422", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230178"} @inproceedings{bb235204, AUTHOR = "Ye, Q.H. and Xu, H.Y. and Ye, J. and Yan, M. and Hu, A. and Liu, H. and Qian, Q. and Zhang, J. and Huang, F.", TITLE = "mPLUG-OwI2: Revolutionizing Multi-modal Large Language Model with Modality Collaboration", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13040-13051", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230179"} @inproceedings{bb235205, AUTHOR = "Qi, P. and Yan, Z. and Hsu, W. and Lee, M.L.", TITLE = "Sniffer: Multimodal Large Language Model for Explainable Out-of-Context Misinformation Detection", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13052-13062", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230180"} @inproceedings{bb235206, AUTHOR = "Zhong, S.S. and Huang, Z.Z. and Gao, S. and Wen, W. and Lin, L. and Zitnik, M. and Zhou, P.", TITLE = "Let's Think Outside the Box: Exploring Leap-of-Thought in Large Language Models with Creative Humor Generation", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13246-13257", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230181"} @inproceedings{bb235207, AUTHOR = "Gao, Z. and Du, Y.T. and Zhang, X.T. and Ma, X.J. and Han, W.J. and Zhu, S.C. and Li, Q.", TITLE = "CLOVA: A Closed-LOop Visual Assistant with Tool Usage and Update", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13258-13268", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230182"} @inproceedings{bb235208, AUTHOR = "Li, B. and Ge, Y.Y. and Ge, Y.X. and Wang, G.Z. and Wang, R. and Zhang, R.M. and Shan, Y.", TITLE = "SEED-Bench: Benchmarking Multimodal Large Language Models", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13299-13308", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230183"} @inproceedings{bb235209, AUTHOR = "Buettner, K. and Malakouti, S. and Li, X.L. and Kovashka, A.", TITLE = "Incorporating Geo-Diverse Knowledge into Prompting for Increased Geographical Robustness in Object Recognition", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13515-13524", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230184"} @inproceedings{bb235210, AUTHOR = "Liu, R. and Li, C. and Ge, Y.X. and Li, T.H. and Shan, Y. and Li, G.", TITLE = "BT-Adapter: Video Conversation is Feasible Without Video Instruction Tuning", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13658-13667", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230185"} @inproceedings{bb235211, AUTHOR = "Li, J.X. and Vo, D.M. and Sugimoto, A. and Nakayama, H.", TITLE = "Evcap: Retrieval-Augmented Image Captioning with External Visual-Name Memory for Open-World Comprehension", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13733-13742", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230186"} @inproceedings{bb235212, AUTHOR = "Song, L. and Chen, Y.K. and Yang, S. and Ding, X.H. and Ge, Y.X. and Chen, Y.C. and Shan, Y.", TITLE = "Low-Rank Approximation for Sparse Attention in Multi-Modal LLMs", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13763-13773", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230187"} @inproceedings{bb235213, AUTHOR = "Guo, Q. and de Mello, S. and Yin, H.X. and Byeon, W. and Cheung, K.C. and Yu, Y.Z. and Luo, P. and Liu, S.", TITLE = "RegionGPT: Towards Region Understanding Vision Language Model", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13796-13806", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230188"} @inproceedings{bb235214, AUTHOR = "Yu, T.Y. and Yao, Y. and Zhang, H.Y. and He, T. and Han, Y.F. and Cui, G. and Hu, J.Y. and Liu, Z.Y. and Zheng, H.T. and Sun, M.", TITLE = "RLHF-V: Towards Trustworthy MLLMs via Behavior Alignment from Fine-Grained Correctional Human Feedback", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13807-13816", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230189"} @inproceedings{bb235215, AUTHOR = "Xuan, S.Y. and Guo, Q. and Yang, M. and Zhang, S.L.", TITLE = "Pink: Unveiling the Power of Referential Comprehension for Multi-modal LLMs", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13838-13848", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230190"} @inproceedings{bb235216, AUTHOR = "Yu, Q. and Sun, Q. and Zhang, X.S. and Cui, Y.F. and Zhang, F. and Cao, Y. and Wang, X.L. and Liu, J.J.", TITLE = "CapsFusion: Rethinking Image-Text Data at Scale", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "14022-14032", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230191"} @inproceedings{bb235217, AUTHOR = "Yao, J.W. and Qian, Q. and Hu, J.", TITLE = "Multi-Modal Proxy Learning Towards Personalized Visual Multiple Clustering", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "14066-14075", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230192"} @inproceedings{bb235218, AUTHOR = "Zou, B. and Yang, C. and Qiao, Y. and Quan, C.B. and Zhao, Y.J.", TITLE = "LLaMA-Excitor: General Instruction Tuning via Indirect Feature Interaction", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "14089-14099", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230193"} @inproceedings{bb235219, AUTHOR = "Hong, W. and Wang, W.H. and Lv, Q.S. and Xu, J.Z. and Yu, W. and Ji, J.H. and Wang, Y. and Wang, Z. and Dong, Y.X. and Ding, M. and Tang, J.", TITLE = "CogAgent: A Visual Language Model for GUI Agents", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "14281-14290", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230194"} @inproceedings{bb235220, AUTHOR = "Mitra, C. and Huang, B. and Darrell, T.J. and Herzig, R.", TITLE = "Compositional Chain-of-Thought Prompting for Large Multimodal Models", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "14420-14431", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230195"} @inproceedings{bb235221, AUTHOR = "Liu, C. and Yin, K. and Cao, H.Y. and Jiang, X.H. and Li, X. and Liu, Y. and Jiang, D.Q. and Sun, X. and Xu, L.", TITLE = "HRVDA: High-Resolution Visual Document Assistant", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "15534-15545", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230196"} @inproceedings{bb235222, AUTHOR = "Luo, C. and Shen, Y.F. and Zhu, Z.Q. and Zheng, Q. and Yu, Z. and Yao, C.", TITLE = "LayoutLLM: Layout Instruction Tuning with Large Language Models for Document Understanding", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "15630-15640", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230197"} @inproceedings{bb235223, AUTHOR = "Yang, Y. and Sun, F.Y. and Weihs, L. and Vanderbilt, E. and Herrasti, A. and Han, W. and Wu, J.J. and Haber, N. and Krishna, R. and Liu, L.J. and Callison Burch, C. and Yatskar, M. and Kembhavi, A. and Clark, C.", TITLE = "Holodeck: Language Guided Generation of 3D Embodied AI Environments", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "16277-16287", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230198"} @inproceedings{bb235224, AUTHOR = "Qin, Y.R. and Zhou, E. and Liu, Q. and Yin, Z.F. and Sheng, L. and Zhang, R.M. and Qiao, Y. and Shao, J.", TITLE = "MP5: A Multi-modal Open-ended Embodied System in Minecraft via Active Perception", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "16307-16316", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230199"} @inproceedings{bb235225, AUTHOR = "Zhang, S. and Yu, X.Y. and Song, X.H. and Wang, X.H. and Jiang, S.Q.", TITLE = "Imagine Before Go: Self-Supervised Generative Map for Object Goal Navigation", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "16414-16425", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230200"} @inproceedings{bb235226, AUTHOR = "Li, H. and Yang, X. and Wang, Z.K. and Zhu, X.Z. and Zhou, J. and Qiao, Y. and Wang, X.G. and Li, H.S. and Lu, L.W. and Dai, J.F.", TITLE = "Auto MC-Reward: Automated Dense Reward Design with Large Language Models for Minecraft", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "16426-16435", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230201"} @inproceedings{bb235227, AUTHOR = "Liu, M.X. and Hayes, T.L. and Ricci, E. and Csurka, G. and Volpi, R.", TITLE = "SHiNe: Semantic Hierarchy Nexus for Open-Vocabulary Object Detection", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "16634-16644", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230202"} @inproceedings{bb235228, AUTHOR = "Kim, J. and Cho, E. and Kim, S. and Kim, H.W.J.", TITLE = "Retrieval-Augmented Open-Vocabulary Object Detection", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "17427-17436", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230203"} @inproceedings{bb235229, AUTHOR = "Saha, O. and van Horn, G. and Maji, S.", TITLE = "Improved Zero-Shot Classification by Adapting VLMs with Text Descriptions", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "17542-17552", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230204"} @inproceedings{bb235230, AUTHOR = "Toubal, I.E. and Avinash, A. and Alldrin, N.G. and Dlabal, J. and Zhou, W. and Luo, E. and Stretcu, O. and Xiong, H. and Lu, C.T. and Zhou, H. and Krishna, R. and Fuxman, A. and Duerig, T.", TITLE = "Modeling Collaborator: Enabling Subjective Vision Classification with Minimal Human Effort via LLM Tool-Use", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "17553-17563", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230205"} @inproceedings{bb235231, AUTHOR = "Li, X.Q. and Xu, J.Y. and Zhang, M.X. and Liu, J.M. and Shen, Y. and Ponomarenko, I. and Xu, J.H. and Heng, L. and Huang, S.Y. and Zhang, S.H. and Dong, H.", TITLE = "Object-Centric Prompt-Driven Vision-Language-Action Model for Robotic Manipulation", BOOKTITLE = CVPR25, YEAR = "2025", PAGES = "27638-27648", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230206"} @inproceedings{bb235232, AUTHOR = "Li, X.Q. and Zhang, M.X. and Geng, Y.R. and Geng, H.R. and Long, Y.X. and Shen, Y. and Zhang, R.R. and Liu, J.M. and Dong, H.", TITLE = "ManipLLM: Embodied Multimodal Large Language Model for Object-Centric Robotic Manipulation", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "18061-18070", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230207"} @inproceedings{bb235233, AUTHOR = "Han, T. and Bain, M. and Nagrani, A. and Varol, G. and Xie, W. and Zisserman, A.", TITLE = "AutoAD III: The Prequel: Back to the Pixels", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "18164-18174", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230208"} @inproceedings{bb235234, AUTHOR = "Song, E. and Chai, W.H. and Wang, G. and Zhang, Y.C. and Zhou, H.Y. and Wu, F. and Chi, H.Z. and Guo, X. and Ye, T. and Zhang, Y.T. and Lu, Y. and Hwang, J.N. and Wang, G.A.", TITLE = "MovieChat: From Dense Token to Sparse Memory for Long Video Understanding", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "18221-18232", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230209"} @inproceedings{bb235235, AUTHOR = "Qu, H.X. and Cai, Y.J. and Liu, J.", TITLE = "LLMs are Good Action Recognizers", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "18395-18406", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230210"} @inproceedings{bb235236, AUTHOR = "Chen, J. and Lv, Z.Y. and Wu, S.W. and Lin, K.Q. and Song, C. and Gao, D.F. and Liu, J.W. and Gao, Z.T. and Mao, D.X. and Shou, M.Z.", TITLE = "VideoLLM-online: Online Video Large Language Model for Streaming Video", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "18407-18418", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230211"} @inproceedings{bb235237, AUTHOR = "Zhu, A. and Ke, Q.H. and Gong, M.M. and Bailey, J.", TITLE = "Part-Aware Unified Representation of Language and Skeleton for Zero-Shot Action Recognition", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "18761-18770", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230212"} @inproceedings{bb235238, AUTHOR = "Chen, T.J. and Yu, H.S. and Yang, Z.G. and Li, Z.C. and Sun, W. and Chen, C.", TITLE = "OST: Refining Text Knowledge with Optimal Spatio-Temporal Descriptor for General Video Recognition", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "18888-18898", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230213"} @inproceedings{bb235239, AUTHOR = "Zhao, Q.H. and Dai, Y. and Li, H. and Hu, W. and Zhang, F. and Liu, J.", TITLE = "LTGC: Long-Tail Recognition via Leveraging LLMs-Driven Generated Content", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "19510-19520", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230214"} @inproceedings{bb235240, AUTHOR = "Siddiqui, Y. and Alliegro, A. and Artemov, A. and Tommasi, T. and Sirigatti, D. and Rosov, V. and Dai, A. and Nießner, M.", TITLE = "MeshGPT: Generating Triangle Meshes with Decoder-Only Transformers", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "19615-19625", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230215"} @inproceedings{bb235241, AUTHOR = "Li, Z. and Gao, Z.Y. and Tan, C. and Ren, B. and Yang, L.T. and Li, S.Z.", TITLE = "General Point Model Pretraining with Autoencoding and Autoregressive", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "20954-20964", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230216"} @inproceedings{bb235242, AUTHOR = "Li, K.C. and Wang, Y. and He, Y. and Li, Y.Z. and Wang, Y. and Liu, Y. and Wang, Z. and Xu, J. and Chen, G. and Lou, P. and Wang, L.M. and Qiao, Y.", TITLE = "MVBench: A Comprehensive Multi-modal Video Understanding Benchmark", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "22195-22206", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230217"} @inproceedings{bb235243, AUTHOR = "Taesiri, M.R. and Feng, T.J. and Bezemer, C.P. and Nguyen, A.", TITLE = "GlitchBench: Can Large Multimodal Models Detect Video Game Glitches?", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "22444-22455", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230218"} @inproceedings{bb235244, AUTHOR = "Zhang, R. and Zhang, Y.Z. and Chen, J. and Zhou, Y.F. and Gu, J.X. and Chen, C. and Sun, T.", TITLE = "TRINS: Towards Multimodal Language Models that Can Read", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "22584-22594", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230219"} @inproceedings{bb235245, AUTHOR = "Dunlap, L. and Zhang, Y.H. and Wang, X.H. and Zhong, R.Q. and Darrell, T.J. and Steinhardt, J. and Gonzalez, J.E. and Yeung Levy, S.", TITLE = "Describing Differences in Image Sets with Natural Language", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "24199-24208", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230220"} @inproceedings{bb235246, AUTHOR = "Ishmam, A.M. and Thomas, C.", TITLE = "Semantic Shield: Defending Vision-Language Models Against Backdooring and Poisoning via Fine-Grained Knowledge Alignment", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "24820-24830", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230221"} @inproceedings{bb235247, AUTHOR = "Yang, Y.J. and Zhou, T.Y. and Li, K. and Tao, D.P. and Li, L. and Shen, L. and He, X.D. and Jiang, J. and Shi, Y.H.", TITLE = "Embodied Multi-Modal Agent trained by an LLM from a Parallel TextWorld", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "26265-26275", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230222"} @inproceedings{bb235248, AUTHOR = "Hong, Y. and Zheng, Z. and Chen, P.H. and Wang, Y.F. and Li, J. and Gan, C.", TITLE = "MultiPLY: A Multisensory Object-Centric Embodied Large Language Model in 3D World", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "26396-26406", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230223"} @inproceedings{bb235249, AUTHOR = "Zhang, Y. and Dong, Y.P. and Zhang, S.Y. and Min, T.Z. and Su, H. and Zhu, J.", TITLE = "Exploring the Transferability of Visual Prompting for Multimodal Large Language Models", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "26552-26562", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230224"} @inproceedings{bb235250, AUTHOR = "Han, J.M. and Gong, K.X. and Zhang, Y.Y. and Wang, J.Q. and Zhang, K. and Lin, D. and Qiao, Y. and Gao, P. and Yue, X.Y.", TITLE = "OneLLM: One Framework to Align All Modalities with Language", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "26574-26585", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230225"} @inproceedings{bb235251, AUTHOR = "Xie, H.X. and Peng, C.J. and Tseng, Y.W. and Chen, H.J. and Hsu, C.F. and Shuai, H.H. and Cheng, W.H.", TITLE = "EmoVIT: Revolutionizing Emotion Insights with Visual Instruction Tuning", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "26586-26595", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230226"} @inproceedings{bb235252, AUTHOR = "Wang, X.Y. and Zhuang, B. and Wu, Q.", TITLE = "ModaVerse: Efficiently Transforming Modalities with LLMs", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "26596-26606", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230227"} @inproceedings{bb235253, AUTHOR = "Lin, J. and Yin, H.X. and Ping, W. and Molchanov, P. and Shoeybi, M. and Han, S.", TITLE = "VILA: On Pre-training for Visual Language Models", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "26679-26689", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230228"} @inproceedings{bb235254, AUTHOR = "Lyu, Y.H. and Zheng, X. and Zhou, J.Z. and Wang, L.", TITLE = "UniBind: LLM-Augmented Unified and Balanced Representation Space to Bind Them All", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "26742-26752", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230229"} @inproceedings{bb235255, AUTHOR = "Liang, T. and Huang, J. and Kong, M. and Chen, L. and Zhu, Q.", TITLE = "Querying as Prompt: Parameter-Efficient Learning for Multimodal Language Model", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "26845-26855", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230230"} @inproceedings{bb235256, AUTHOR = "Zhu, L. and Wei, F. and Lu, Y.", TITLE = "Beyond Text: Frozen Large Language Models in Visual Signal Comprehension", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "27037-27047", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230231"} @inproceedings{bb235257, AUTHOR = "Pi, R.J. and Yao, L.W. and Gao, J.H. and Zhang, J.P. and Zhang, T.", TITLE = "PerceptionGPT: Effectively Fusing Visual Perception Into LLM", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "27114-27123", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230232"} @inproceedings{bb235258, AUTHOR = "Tai, Y. and Fan, W.C. and Zhang, Z. and Liu, Z.W.", TITLE = "Link-Context Learning for Multimodal LLMs", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "27166-27175", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230233"} @inproceedings{bb235259, AUTHOR = "Tang, Z. and Yang, Z. and Khademi, M. and Liu, Y. and Zhu, C.G. and Bansal, M.", TITLE = "CoDi-2: In-Context, Interleaved, and Interactive Any-to-Any Generation", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "27415-27424", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230234"} @inproceedings{bb235260, AUTHOR = "Jain, J. and Yang, J.W. and Shi, H.", TITLE = "VCoder: Versatile Vision Encoders for Multimodal Large Language Models", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "27992-28002", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230235"} @inproceedings{bb235261, AUTHOR = "Yuan, Y.Q. and Li, W. and Liu, J. and Tang, D.Q. and Luo, X.J. and Qin, C. and Zhang, L. and Zhu, J.", TITLE = "Osprey: Pixel Understanding with Visual Instruction Tuning", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "28202-28211", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230236"} @inproceedings{bb235262, AUTHOR = "Zheng, Z.H. and Wei, J. and Hu, X.F. and Zhu, H.D. and Nevatia, R.", TITLE = "Large Language Models are Good Prompt Learners for Low-Shot Image Classification", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "28453-28462", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230237"} @inproceedings{bb235263, AUTHOR = "He, H.Y. and Pan, Z.Z. and Liu, J. and Cai, J.F. and Zhuang, B.", TITLE = "Efficient Stitchable Task Adaptation", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "28555-28565", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230238"} @inproceedings{bb235264, AUTHOR = "Tian, X.Y. and Zou, S. and Yang, Z.Y. and Zhang, J.", TITLE = "ArGue: Attribute-Guided Prompt Tuning for Vision-Language Models", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "28578-28587", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230239"} @inproceedings{bb235265, AUTHOR = "Roberts, J. and Luddecke, T. and Sheikh, R. and Han, K. and Albanie, S.", TITLE = "Charting New Territories: Exploring the Geographic and Geospatial Capabilities of Multimodal LLMs", BOOKTITLE = EarthVision24, YEAR = "2024", PAGES = "554-563", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230240"} @inproceedings{bb235266, AUTHOR = "Barbany, O. and Huang, M. and Zhu, X.L. and Dhua, A.", TITLE = "Leveraging Large Language Models for Multimodal Search", BOOKTITLE = FGVC24, YEAR = "2024", PAGES = "1201-1210", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230241"} @inproceedings{bb235267, AUTHOR = "Lv, J.X. and Huang, Y. and Yan, M. and Huang, J.C. and Liu, J.Z. and Liu, Y.F. and Wen, Y.F. and Chen, X.X. and Chen, S.F.", TITLE = "GPT4Motion: Scripting Physical Motions in Text-to-Video Generation via Blender-Oriented GPT Planning", BOOKTITLE = PBDL24, YEAR = "2024", PAGES = "1430-1440", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230242"} @inproceedings{bb235268, AUTHOR = "Baldassini, F.B. and Shukor, M. and Cord, M. and Soulier, L. and Piwowarski, B.", TITLE = "What Makes Multimodal In-Context Learning Work?", BOOKTITLE = Prompting24, YEAR = "2024", PAGES = "1539-1550", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230243"} @inproceedings{bb235269, AUTHOR = "Wang, J.C. and Ke, L.", TITLE = "LLM-Seg: Bridging Image Segmentation and Large Language Model Reasoning", BOOKTITLE = WhatNext24, YEAR = "2024", PAGES = "1765-1774", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230244"} @inproceedings{bb235270, AUTHOR = "Hakim, Z.I.A. and Sarker, N.H. and Singh, R.P. and Paul, B. and Dabouei, A. and Xu, M.", TITLE = "Leveraging Generative Language Models for Weakly Supervised Sentence Component Analysis in Video-Language Joint Learning", BOOKTITLE = MULA24, YEAR = "2024", PAGES = "1975-1985", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230245"} @inproceedings{bb235271, AUTHOR = "Deria, A. and Kumar, K. and Chakraborty, S. and Mahapatra, D. and Roy, S.", TITLE = "InVERGe: Intelligent Visual Encoder for Bridging Modalities in Report Generation", BOOKTITLE = MULA24, YEAR = "2024", PAGES = "2028-2038", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230246"} @inproceedings{bb235272, AUTHOR = "Ma, F.P. and Zhou, Y.Z. and Zhang, Y.Y. and Wu, S.Y. and Zhang, Z. and He, Z.L. and Rao, F.Y. and Sun, X.Y.", TITLE = "Task Navigator: Decomposing Complex Tasks for Multimodal Large Language Models", BOOKTITLE = Reasoning24, YEAR = "2024", PAGES = "2248-2257", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230247"} @inproceedings{bb235273, AUTHOR = "Arefeen, M.A. and Debnath, B. and Uddin, M.Y.S. and Chakradhar, S.", TITLE = "ViTA: An Efficient Video-to-Text Algorithm using VLM for RAG-based Video Analysis System", BOOKTITLE = Reasoning24, YEAR = "2024", PAGES = "2266-2274", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230248"} @inproceedings{bb235274, AUTHOR = "Chen, Y.W. and Chu, S.Y.", TITLE = "Large Language Models in Wargaming: Methodology, Application, and Robustness", BOOKTITLE = AML24, YEAR = "2024", PAGES = "2894-2903", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230249"} @inproceedings{bb235275, AUTHOR = "Lai, Z.X. and Wu, J. and Chen, S. and Zhou, Y.C. and Hovakimyan, N.", TITLE = "Residual-based Language Models are Free Boosters for Biomedical Imaging Tasks", BOOKTITLE = DEF-AI-MIA24, YEAR = "2024", PAGES = "5086-5096", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230250"} @inproceedings{bb235276, AUTHOR = "Verma, A.A. and Saeidi, A. and Hegde, S. and Therala, A. and Bardoliya, F.D. and Machavarapu, N. and Ravindhiran, S.A.K. and Malyala, S. and Chatterjee, A. and Yang, Y.Z. and Baral, C.", TITLE = "Evaluating Multimodal Large Language Models across Distribution Shifts and Augmentations", BOOKTITLE = GenerativeFM24, YEAR = "2024", PAGES = "5314-5324", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230251"} @inproceedings{bb235277, AUTHOR = "Fang, X. and Wang, W.G. and Lv, X.X. and Yan, J.", TITLE = "PCQA: A Strong Baseline for AIGC Quality Assessment Based on Prompt Condition", BOOKTITLE = NTIRE24, YEAR = "2024", PAGES = "6167-6176", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230252"} @inproceedings{bb235278, AUTHOR = "Ye, Z. and Liu, J.X. and Cao, J.J. and Chen, Z.Y. and Xuan, Z.W. and Zhou, M.Y. and Liu, Q. and Qi, G.J.", TITLE = "OpenStory: A Large-Scale Open-Domain Dataset for Subject-Driven Visual Storytelling", BOOKTITLE = VDU24, YEAR = "2024", PAGES = "7953-7962", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230253"} @inproceedings{bb235279, AUTHOR = "Chen, X.Y. and Liu, J. and Wang, Y. and Wang, P.P. and Brand, M. and Wang, G.H. and Koike Akino, T.", TITLE = "SuperLoRA: Parameter-Efficient Unified Adaptation for Large Vision Models", BOOKTITLE = ECV24, YEAR = "2024", PAGES = "8050-8055", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230254"} @inproceedings{bb235280, AUTHOR = "Wei, C. and Liu, C.X. and Qiao, S.Y. and Zhang, Z.S. and Yuille, A.L. and Yu, J.H.", TITLE = "De-Diffusion Makes Text a Strong Cross-Modal Interface", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13492-13503", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230255"} @inproceedings{bb235281, AUTHOR = "Chen, Y. and Sikka, K. and Cogswell, M. and Ji, H. and Divakaran, A.", TITLE = "DRESS: Instructing Large Vision-Language Models to Align and Interact with Humans via Natural Language Feedback", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "14239-14250", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230256"} @inproceedings{bb235282, AUTHOR = "Chen, B. and Xu, Z. and Kirmani, S. and Ichter, B. and Sadigh, D. and Guibas, L.J. and Xia, F.", TITLE = "SpatialVLM: Endowing Vision-Language Models with Spatial Reasoning Capabilities", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "14455-14465", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230257"} @inproceedings{bb235283, AUTHOR = "Dorkenwald, M. and Barazani, N. and Snoek, C.G.M. and Asano, Y.M.", TITLE = "PIN: Positional Insert Unlocks Object Localisation Abilities in VLMs", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13548-13558", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230258"} @inproceedings{bb235284, AUTHOR = "Cha, J. and Kang, W. and Mun, J. and Roh, B.", TITLE = "Honeybee: Locality-Enhanced Projector for Multimodal LLM", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13817-13827", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230259"} @inproceedings{bb235285, AUTHOR = "Sun, Z.Y. and Fang, Y. and Wu, T. and Zhang, P. and Zang, Y.H. and Kong, S. and Xiong, Y.J. and Lin, D. and Wang, J.Q.", TITLE = "Alpha-CLIP: A CLIP Model Focusing on Wherever you Want", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13019-13029", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230260"} @inproceedings{bb235286, AUTHOR = "Parashar, S. and Lin, Z.Q. and Liu, T. and Dong, X.J. and Li, Y. and Ramanan, D. and Caverlee, J. and Kong, S.", TITLE = "The Neglected Tails in Vision-Language Models", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "12988-12997", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230261"} @inproceedings{bb235287, AUTHOR = "Luo, Y. and Shi, M. and Khan, M.O. and Afzal, M.M. and Huang, H. and Yuan, S. and Tian, Y. and Song, L. and Kouhana, A. and Elze, T. and Fang, Y. and Wang, M.Y.", TITLE = "FairCLIP: Harnessing Fairness in Vision-Language Learning", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "12289-12301", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230262"} @inproceedings{bb235288, AUTHOR = "Zara, G. and Conti, A. and Roy, S. and Lathuiliere, S. and Rota, P. and Ricci, E.", TITLE = "The Unreasonable Effectiveness of Large Language-Vision Models for Source-free Video Domain Adaptation", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "10273-10283", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230263"} @inproceedings{bb235289, AUTHOR = "Zhao, H.B. and Ni, B.L. and Fan, J.S. and Wang, Y.X. and Chen, Y.T. and Meng, G.F. and Zhang, Z.X.", TITLE = "Continual Forgetting for Pre-Trained Vision Models", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "28631-28642", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230264"} @inproceedings{bb235290, AUTHOR = "Zhan, X.Y. and Yang, L.X. and Zhao, Y.F. and Mao, K. and Xu, H.L. and Lin, Z. and Li, K.L. and Lu, C.", TITLE = "OakInk2: A Dataset of Bimanual Hands-Object Manipulation in Complex Task Completion", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "445-456", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230265"} @inproceedings{bb235291, AUTHOR = "Li, Y.C. and Zhao, N. and Xiao, J.B. and Feng, C. and Wang, X. and Chua, T.S.", TITLE = "LASO: Language-Guided Affordance Segmentation on 3D Object", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "14251-14260", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230266"} @inproceedings{bb235292, AUTHOR = "Rotstein, N. and Bensaid, D. and Brody, S. and Ganz, R. and Kimmel, R.", TITLE = "FuseCap: Leveraging Large Language Models for Enriched Fused Image Captions", BOOKTITLE = WACV24, YEAR = "2024", PAGES = "5677-5688", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llm4.html#TT230267"} @article{bb235293, AUTHOR = "Lin, B.Q. and Nie, Y. and Wei, Z.M. and Chen, J.Q. and Ma, S. and Han, J.H. and Xu, H. and Chang, X.J. and Liang, X.D.", TITLE = "NavCoT: Boosting LLM-Based Vision-and-Language Navigation via Learning Disentangled Reasoning", JOURNAL = PAMI, VOLUME = "47", YEAR = "2025", NUMBER = "7", MONTH = "July", PAGES = "5945-5957", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llmdr5.html#TT230268"} @article{bb235294, AUTHOR = "Ding, X.P. and Han, J.H. and Xu, H. and Zhang, W. and Li, X.M.", TITLE = "HiLM-D: Enhancing MLLMs with Multi-scale High-Resolution Details for Autonomous Driving", JOURNAL = IJCV, VOLUME = "133", YEAR = "2025", NUMBER = "8", MONTH = "August", PAGES = "5379-5395", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llmdr5.html#TT230269"} @inproceedings{bb235295, AUTHOR = "Ding, X.P. and Han, J.H. and Xu, H. and Liang, X.D. and Zhang, W. and Li, X.M.", TITLE = "Holistic Autonomous Driving Understanding by Bird'View Injected Multi-Modal Large Models", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "13668-13677", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llmdr5.html#TT230270"} @article{bb235296, AUTHOR = "Liu, T.Q. and Qin, Y.J. and Zhang, S.H. and Tao, X.M.", TITLE = "Empowering Corner Case Detection in Autonomous Vehicles With Multimodal Large Language Models", JOURNAL = SPLetters, VOLUME = "32", YEAR = "2025", PAGES = "51-55", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llmdr5.html#TT230271"} @article{bb235297, AUTHOR = "Wu, M.Y. and Yu, F.R. and Liu, P.X.P. and He, Y.", TITLE = "Facilitating Autonomous Driving Tasks With Large Language Models", JOURNAL = IEEE_Int_Sys, VOLUME = "40", YEAR = "2025", NUMBER = "1", MONTH = "January", PAGES = "45-52", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llmdr5.html#TT230272"} @article{bb235298, AUTHOR = "Cao, J.H. and Liu, S. and Wu, C.F. and Li, Y. and Du, S.", TITLE = "ATHENA - Autonomous Vehicle Trajectory Planning Considered Human Action Awareness", JOURNAL = SPLetters, VOLUME = "32", YEAR = "2025", PAGES = "1845-1849", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llmdr5.html#TT230273"} @inproceedings{bb235299, AUTHOR = "Chen, K. and Li, Y.Z. and Zhang, W.H. and Liu, Y.X. and Li, P.X. and Gao, R. and Hong, L.Q. and Tian, M. and Zhao, X.H. and Li, Z.G. and Yeung, D.Y. and Lu, H.C. and Jia, X.", TITLE = "Automated Evaluation of Large Vision-Language Models on Self-Driving Corner Cases", BOOKTITLE = WACV25, YEAR = "2025", PAGES = "7817-7826", BIBSOURCE = "http://www.visionbib.com/bibliography/applicat803llmdr5.html#TT230274"}