@inproceedings{bb151200, AUTHOR = "Lee, S. and Choi, J. and Kim, H.W.J.", TITLE = "Multi-Criteria Token Fusion with One-Step-Ahead Attention for Efficient Vision Transformers", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "15741-15750", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147128"} @inproceedings{bb151201, AUTHOR = "Zhang, S.X. and Liu, H.P. and Lin, S. and He, K.", TITLE = "You Only Need Less Attention at Each Stage in Vision Transformers", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "6057-6066", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147129"} @inproceedings{bb151202, AUTHOR = "Li, L. and Wei, Z. and Dong, P. and Luo, W.H. and Xue, W. and Liu, Q.F. and Guo, Y.", TITLE = "Attnzero: Efficient Attention Discovery for Vision Transformers", BOOKTITLE = ECCV24, YEAR = "2024", PAGES = "V: 20-37", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147130"} @inproceedings{bb151203, AUTHOR = "Bao Long, N.H. and Zhang, C.Y. and Shi, Y.Z. and Hirakawa, T. and Yamashita, T. and Matsui, T. and Fujiyoshi, H.", TITLE = "Debiformer: Vision Transformer with Deformable Agent Bi-level Routing Attention", BOOKTITLE = ACCV24, YEAR = "2024", PAGES = "X: 445-462", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147131"} @inproceedings{bb151204, AUTHOR = "Yang, X. and Yuan, L.Z. and Wilber, K. and Sharma, A. and Gu, X.Y. and Qiao, S.Y. and Debats, S. and Wang, H.S. and Adam, H. and Sirotenko, M. and Chen, L.C.", TITLE = "PolyMaX: General Dense Prediction with Mask Transformer", BOOKTITLE = WACV24, YEAR = "2024", PAGES = "1039-1050", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147132"} @inproceedings{bb151205, AUTHOR = "Nie, X.S. and Chen, X. and Jin, H.Y. and Zhu, Z.H. and Yan, Y.F. and Qi, D.L.", TITLE = "Triplet Attention Transformer for Spatiotemporal Predictive Learning", BOOKTITLE = WACV24, YEAR = "2024", PAGES = "7021-7030", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147133"} @inproceedings{bb151206, AUTHOR = "Cai, H. and Li, J. and Hu, M. and Gan, C. and Han, S.", TITLE = "EfficientViT: Lightweight Multi-Scale Attention for High-Resolution Dense Prediction", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "17256-17267", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147134"} @inproceedings{bb151207, AUTHOR = "Ryu, J. and Han, D.Y. and Lim, J.W.", TITLE = "Gramian Attention Heads are Strong yet Efficient Vision Learners", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "5818-5828", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147135"} @inproceedings{bb151208, AUTHOR = "Xu, R.H. and Zhang, H. and Hu, W.Z. and Zhang, S.L. and Wang, X.Y.", TITLE = "ParCNetV2: Oversized Kernel with Enhanced Attention*", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "5729-5739", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147136"} @inproceedings{bb151209, AUTHOR = "Zhao, B.Y. and Yu, Z. and Lan, S.Y. and Cheng, Y. and Anandkumar, A. and Lao, Y.J. and Alvarez, J.M.", TITLE = "Fully Attentional Networks with Self-emerging Token Labeling", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "5562-5572", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147137"} @inproceedings{bb151210, AUTHOR = "Guo, Y. and Stutz, D. and Schiele, B.", TITLE = "Robustifying Token Attention for Vision Transformers", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "17511-17522", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147138"} @inproceedings{bb151211, AUTHOR = "Zhao, Y.P. and Tang, H.D. and Jiang, Y.Y. and A, Y. and Wu, Q. and Wang, J.", TITLE = "Parameter-Efficient Vision Transformer with Linear Attention", BOOKTITLE = ICIP23, YEAR = "2023", PAGES = "1275-1279", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147139"} @inproceedings{bb151212, AUTHOR = "Shi, L. and Huang, H.D. and Song, B. and Tan, M. and Zhao, W.Z. and Xia, T. and Ren, P.J.", TITLE = "TAQ: Top-K Attention-Aware Quantization for Vision Transformers", BOOKTITLE = ICIP23, YEAR = "2023", PAGES = "1750-1754", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147140"} @inproceedings{bb151213, AUTHOR = "Baili, N. and Frigui, H.", TITLE = "ADA-VIT: Attention-Guided Data Augmentation for Vision Transformers", BOOKTITLE = ICIP23, YEAR = "2023", PAGES = "385-389", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147141"} @inproceedings{bb151214, AUTHOR = "Ding, M.Y. and Shen, Y. and Fan, L.J. and Chen, Z.F. and Chen, Z. and Luo, P. and Tenenbaum, J. and Gan, C.", TITLE = "Visual Dependency Transformers: Dependency Tree Emerges from Reversed Attention", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "14528-14539", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147142"} @inproceedings{bb151215, AUTHOR = "Song, J.C. and Mou, C. and Wang, S.Q. and Ma, S.W. and Zhang, J.", TITLE = "Optimization-Inspired Cross-Attention Transformer for Compressive Sensing", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "6174-6184", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147143"} @inproceedings{bb151216, AUTHOR = "Hassani, A. and Walton, S. and Li, J.C. and Li, S. and Shi, H.", TITLE = "Neighborhood Attention Transformer", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "6185-6194", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147144"} @inproceedings{bb151217, AUTHOR = "Liu, Z.J. and Yang, X.Y. and Tang, H.T. and Yang, S. and Han, S.", TITLE = "FlatFormer: Flattened Window Attention for Efficient Point Cloud Transformer", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "1200-1211", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147145"} @inproceedings{bb151218, AUTHOR = "Pan, X. and Ye, T.Z. and Xia, Z.F. and Song, S. and Huang, G.", TITLE = "Slide-Transformer: Hierarchical Vision Transformer with Local Self-Attention", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "2082-2091", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147146"} @inproceedings{bb151219, AUTHOR = "Zhu, L. and Wang, X.J. and Ke, Z.H. and Zhang, W. and Lau, R.", TITLE = "BiFormer: Vision Transformer with Bi-Level Routing Attention", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "10323-10333", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147147"} @inproceedings{bb151220, AUTHOR = "Long, S. and Zhao, Z. and Pi, J. and Wang, S.S. and Wang, J.D.", TITLE = "Beyond Attentive Tokens: Incorporating Token Importance and Diversity for Efficient Vision Transformers", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "10334-10343", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147148"} @inproceedings{bb151221, AUTHOR = "Liu, X.Y. and Peng, H. and Zheng, N.X. and Yang, Y.Q. and Hu, H. and Yuan, Y.X.", TITLE = "EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "14420-14430", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147149"} @inproceedings{bb151222, AUTHOR = "You, H.R. and Xiong, Y. and Dai, X.L. and Wu, B. and Zhang, P.Z. and Fan, H.Q. and Vajda, P. and Lin, Y.Y.C.", TITLE = "Castling-ViT: Compressing Self-Attention via Switching Towards Linear-Angular Attention at Vision Transformer Inference", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "14431-14442", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147150"} @inproceedings{bb151223, AUTHOR = "Grainger, R. and Paniagua, T. and Song, X. and Cuntoor, N. and Lee, M.W. and Wu, T.F.", TITLE = "PaCa-ViT: Learning Patch-to-Cluster Attention in Vision Transformers", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "18568-18578", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147151"} @inproceedings{bb151224, AUTHOR = "Wei, C. and Duke, B. and Jiang, R. and Aarabi, P. and Taylor, G.W. and Shkurti, F.", TITLE = "Sparsifiner: Learning Sparse Instance-Dependent Attention for Efficient Vision Transformers", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "22680-22689", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147152"} @inproceedings{bb151225, AUTHOR = "Bhattacharyya, M. and Chattopadhyay, S. and Nag, S.", TITLE = "DeCAtt: Efficient Vision Transformers with Decorrelated Attention Heads", BOOKTITLE = ECV23, YEAR = "2023", PAGES = "4695-4699", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147153"} @inproceedings{bb151226, AUTHOR = "Zhang, Y. and Chen, D. and Kundu, S. and Li, C.H. and Beerel, P.A.", TITLE = "SAL-ViT: Towards Latency Efficient Private Inference on ViT using Selective Attention Search with a Learnable Softmax Approximation", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "5093-5102", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147154"} @inproceedings{bb151227, AUTHOR = "Yeganeh, Y. and Farshad, A. and Weinberger, P. and Ahmadi, S.A. and Adeli, E. and Navab, N.", TITLE = "Transformers Pay Attention to Convolutions Leveraging Emerging Properties of ViTs by Dual Attention-Image Network", BOOKTITLE = CVAMD23, YEAR = "2023", PAGES = "2296-2307", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147155"} @inproceedings{bb151228, AUTHOR = "Zheng, J.H. and Yang, L.Q. and Li, Y. and Yang, K. and Wang, Z.Y. and Zhou, J.", TITLE = "Lightweight Vision Transformer with Spatial and Channel Enhanced Self-Attention", BOOKTITLE = REDLCV23, YEAR = "2023", PAGES = "1484-1488", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147156"} @inproceedings{bb151229, AUTHOR = "Hyeon Woo, N. and Yu Ji, K. and Heo, B. and Han, D.Y. and Oh, S.J. and Oh, T.H.", TITLE = "Scratching Visual Transformer's Back with Uniform Attention", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "5784-5795", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147157"} @inproceedings{bb151230, AUTHOR = "Zhang, H.K. and Hu, W.Z. and Wang, X.Y.", TITLE = "Fcaformer: Forward Cross Attention in Hybrid Vision Transformer", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "6037-6046", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147158"} @inproceedings{bb151231, AUTHOR = "Zeng, W.X. and Li, M. and Xiong, W.J. and Tong, T. and Lu, W.J. and Tan, J. and Wang, R.S. and Huang, R.", TITLE = "MPCViT: Searching for Accurate and Efficient MPC-Friendly Vision Transformer with Heterogeneous Attention", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "5029-5040", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147159"} @inproceedings{bb151232, AUTHOR = "Psomas, B. and Kakogeorgiou, I. and Karantzalos, K. and Avrithis, Y.", TITLE = "Keep It SimPool:Who Said Supervised Transformers Suffer from Attention Deficit?", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "5327-5337", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147160"} @inproceedings{bb151233, AUTHOR = "Han, D.C. and Pan, X. and Han, Y.Z. and Song, S. and Huang, G.", TITLE = "FLatten Transformer: Vision Transformer using Focused Linear Attention", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "5938-5948", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147161"} @inproceedings{bb151234, AUTHOR = "Tatsunami, Y. and Taki, M.", TITLE = "RaftMLP: How Much Can Be Done Without Attention and with Less Spatial Locality?", BOOKTITLE = ACCV22, YEAR = "2022", PAGES = "VI:459-475", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147162"} @inproceedings{bb151235, AUTHOR = "Bolya, D. and Fu, C.Y. and Dai, X.L. and Zhang, P.Z. and Hoffman, J.", TITLE = "Hydra Attention: Efficient Attention with Many Heads", BOOKTITLE = CADK22, YEAR = "2022", PAGES = "35-49", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147163"} @inproceedings{bb151236, AUTHOR = "Chen, X.Y. and Hu, Q. and Li, K. and Zhong, C. and Wang, G.H.", TITLE = "Accumulated Trivial Attention Matters in Vision Transformers on Small Datasets", BOOKTITLE = WACV23, YEAR = "2023", PAGES = "3973-3981", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147164"} @inproceedings{bb151237, AUTHOR = "Lan, H. and Wang, X. and Shen, H. and Liang, P. and Wei, X.", TITLE = "Couplformer: Rethinking Vision Transformer with Coupling Attention", BOOKTITLE = WACV23, YEAR = "2023", PAGES = "6464-6473", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147165"} @inproceedings{bb151238, AUTHOR = "Debnath, B. and Po, O. and Chowdhury, F.A. and Chakradhar, S.", TITLE = "Cosine Similarity based Few-Shot Video Classifier with Attention-based Aggregation", BOOKTITLE = "ICPR22", YEAR = "2022", PAGES = "1273-1279", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147166"} @inproceedings{bb151239, AUTHOR = "Mari, C.R. and Gonzalez, D.V. and Bou Balust, E.", TITLE = "Multi-Scale Transformer-Based Feature Combination for Image Retrieval", BOOKTITLE = ICIP22, YEAR = "2022", PAGES = "3166-3170", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147167"} @inproceedings{bb151240, AUTHOR = "Furukawa, R. and Hotta, K.", TITLE = "Local Embedding for Axial Attention", BOOKTITLE = ICIP22, YEAR = "2022", PAGES = "2586-2590", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147168"} @inproceedings{bb151241, AUTHOR = "Ding, M.Y. and Xiao, B. and Codella, N. and Luo, P. and Wang, J.D. and Yuan, L.", TITLE = "DaViT: Dual Attention Vision Transformers", BOOKTITLE = ECCV22, YEAR = "2022", PAGES = "XXIV:74-92", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147169"} @inproceedings{bb151242, AUTHOR = "Wang, P.C. and Wang, X. and Wang, F. and Lin, M. and Chang, S.N. and Li, H. and Jin, R.", TITLE = "KVT: k-NN Attention for Boosting Vision Transformers", BOOKTITLE = ECCV22, YEAR = "2022", PAGES = "XXIV:285-302", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147170"} @inproceedings{bb151243, AUTHOR = "Rao, Y.M. and Zhao, W.L. and Zhou, J. and Lu, J.W.", TITLE = "AMixer: Adaptive Weight Mixing for Self-Attention Free Vision Transformers", BOOKTITLE = ECCV22, YEAR = "2022", PAGES = "XXI:50-67", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147171"} @inproceedings{bb151244, AUTHOR = "Li, A. and Jiao, J.C. and Li, N. and Qi, W. and Xu, W. and Pang, M.", TITLE = "Conmw Transformer: A General Vision Transformer Backbone With Merged-Window Attention", BOOKTITLE = ICIP22, YEAR = "2022", PAGES = "1551-1555", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147172"} @inproceedings{bb151245, AUTHOR = "Zhang, Q.M. and Xu, Y.F. and Zhang, J. and Tao, D.C.", TITLE = "VSA: Learning Varied-Size Window Attention in Vision Transformers", BOOKTITLE = ECCV22, YEAR = "2022", PAGES = "XXV:466-483", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147173"} @inproceedings{bb151246, AUTHOR = "Mallick, R. and Benois Pineau, J. and Zemmari, A.", TITLE = "I Saw: A Self-Attention Weighted Method for Explanation of Visual Transformers", BOOKTITLE = ICIP22, YEAR = "2022", PAGES = "3271-3275", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147174"} @inproceedings{bb151247, AUTHOR = "Song, Z.K. and Yu, J.Q. and Chen, Y.P.P. and Yang, W.", TITLE = "Transformer Tracking with Cyclic Shifting Window Attention", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "8781-8790", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147175"} @inproceedings{bb151248, AUTHOR = "Yang, C.L. and Wang, Y.L. and Zhang, J.M. and Zhang, H. and Wei, Z.J. and Lin, Z. and Yuille, A.L.", TITLE = "Lite Vision Transformer with Enhanced Self-Attention", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "11988-11998", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147176"} @inproceedings{bb151249, AUTHOR = "Xia, Z.F. and Pan, X. and Song, S. and Li, L.E. and Huang, G.", TITLE = "Vision Transformer with Deformable Attention", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "4784-4793", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147177"} @inproceedings{bb151250, AUTHOR = "Yu, T. and Khalitov, R. and Cheng, L. and Yang, Z.R.", TITLE = "Paramixer: Parameterizing Mixing Links in Sparse Factors Works Better than Dot-Product Self-Attention", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "681-690", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147178"} @inproceedings{bb151251, AUTHOR = "Cheng, B. and Misra, I. and Schwing, A.G. and Kirillov, A. and Girdhar, R.", TITLE = "Masked-attention Mask Transformer for Universal Image Segmentation", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "1280-1289", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147179"} @inproceedings{bb151252, AUTHOR = "Rangrej, S.B. and Srinidhi, C.L. and Clark, J.J.", TITLE = "Consistency driven Sequential Transformers Attention Model for Partially Observable Scenes", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "2508-2517", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147180"} @inproceedings{bb151253, AUTHOR = "Chen, C.F.R. and Fan, Q.F. and Panda, R.", TITLE = "CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification", BOOKTITLE = ICCV21, YEAR = "2021", PAGES = "347-356", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147181"} @inproceedings{bb151254, AUTHOR = "Chefer, H. and Gur, S. and Wolf, L.B.", TITLE = "Generic Attention-model Explainability for Interpreting Bi-Modal and Encoder-Decoder Transformers", BOOKTITLE = ICCV21, YEAR = "2021", PAGES = "387-396", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147182"} @inproceedings{bb151255, AUTHOR = "Xu, W.J. and Xu, Y.F. and Chang, T. and Tu, Z.W.", TITLE = "Co-Scale Conv-Attentional Image Transformers", BOOKTITLE = ICCV21, YEAR = "2021", PAGES = "9961-9970", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147183"} @inproceedings{bb151256, AUTHOR = "Yang, G.L. and Tang, H. and Ding, M.L. and Sebe, N. and Ricci, E.", TITLE = "Transformer-Based Attention Networks for Continuous Pixel-Wise Prediction", BOOKTITLE = ICCV21, YEAR = "2021", PAGES = "16249-16259", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147184"} @inproceedings{bb151257, AUTHOR = "Kim, K. and Wu, B.C. and Dai, X.L. and Zhang, P.Z. and Yan, Z.C. and Vajda, P. and Kim, S.", TITLE = "Rethinking the Self-Attention in Vision Transformers", BOOKTITLE = ECV21, YEAR = "2021", PAGES = "3065-3069", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT147185"} @article{bb151258, AUTHOR = "Yang, J.H. and Li, X.Y. and Zheng, M. and Wang, Z.H. and Zhu, Y.Q. and Guo, X.Q. and Yuan, Y.C. and Chai, Z. and Jiang, S.Q.", TITLE = "MemBridge: Video-Language Pre-Training With Memory-Augmented Inter-Modality Bridge", JOURNAL = IP, VOLUME = "32", YEAR = "2023", PAGES = "4073-4087", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147186"} @article{bb151259, AUTHOR = "Selva, J. and Johansen, A.S. and Escalera, S. and Nasrollahi, K. and Moeslund, T.B. and Clapes, A.", TITLE = "Video Transformers: A Survey", JOURNAL = PAMI, VOLUME = "45", YEAR = "2023", NUMBER = "11", MONTH = "November", PAGES = "12922-12943", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147187"} @article{bb151260, AUTHOR = "Zhang, Z.C. and Chen, Z.D. and Wang, Y.X. and Luo, X. and Xu, X.S.", TITLE = "A vision transformer for fine-grained classification by reducing noise and enhancing discriminative information", JOURNAL = PR, VOLUME = "145", YEAR = "2024", PAGES = "109979", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147188"} @article{bb151261, AUTHOR = "Xian, K. and Peng, J. and Cao, Z.G. and Zhang, J.M. and Lin, G.S.", TITLE = "ViTA: Video Transformer Adaptor for Robust Video Depth Estimation", JOURNAL = MultMed, VOLUME = "26", YEAR = "2024", PAGES = "3302-3316", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147189"} @article{bb151262, AUTHOR = "Zhang, J.S. and Gu, L.F. and Lai, Y.K. and Wang, X.Y. and Li, K.", TITLE = "Toward Grouping in Large Scenes With Occlusion-Aware Spatio-Temporal Transformers", JOURNAL = CirSysVideo, VOLUME = "34", YEAR = "2024", NUMBER = "5", MONTH = "May", PAGES = "3919-3929", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147190"} @inproceedings{bb151263, AUTHOR = "Goyal, R. and Fan, W.C. and Siam, M. and Sigal, L.", TITLE = "TAM-VT: Transformation-Aware Multi-Scale Video Transformer for Segmentation and Tracking", BOOKTITLE = WACV25, YEAR = "2025", PAGES = "8336-8345", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147191"} @inproceedings{bb151264, AUTHOR = "Wu, R. and Zhou, F.X. and Yin, Z.W. and Liu, K.J.", TITLE = "Aligning Neuronal Coding of Dynamic Visual Scenes with Foundation Vision Models", BOOKTITLE = ECCV24, YEAR = "2024", PAGES = "LXXXVIII: 238-254", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147192"} @inproceedings{bb151265, AUTHOR = "Lu, Y.W. and Liu, D.F. and Wang, Q.F. and Han, C. and Cui, Y.M. and Cao, Z.W. and Zhang, X.L. and Chen, Y.J.V. and Fan, H.", TITLE = "ProMotion: Prototypes as Motion Learners", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "28109-28119", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147193"} @inproceedings{bb151266, AUTHOR = "Choi, J. and Lee, S. and Chu, J.W. and Choi, M. and Kim, H.W.J.", TITLE = "vid-TLDR: Training Free Token merging for Light-Weight Video Transformer", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "18771-18781", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147194"} @inproceedings{bb151267, AUTHOR = "Kowal, M. and Dave, A. and Ambrus, R. and Gaidon, A. and Derpanis, K.G. and Tokmakov, P.", TITLE = "Understanding Video Transformers via Universal Concept Discovery", BOOKTITLE = CVPR24, YEAR = "2024", PAGES = "10946-10956", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147195"} @inproceedings{bb151268, AUTHOR = "Herzig, R. and Abramovich, O. and Ben Avraham, E. and Arbelle, A. and Karlinsky, L. and Shamir, A. and Darrell, T.J. and Globerson, A.", TITLE = "PromptonomyViT: Multi-Task Prompt Learning Improves Video Transformers using Synthetic Scene Data", BOOKTITLE = WACV24, YEAR = "2024", PAGES = "6789-6801", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147196"} @inproceedings{bb151269, AUTHOR = "Li, K.C. and Wang, Y. and Li, Y.Z. and Wang, Y. and He, Y. and Wang, L.M. and Qiao, Y.", TITLE = "Unmasked Teacher: Towards Training-Efficient Video Foundation Models", BOOKTITLE = ICCV23, YEAR = "2023", PAGES = "19891-19903", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147197"} @inproceedings{bb151270, AUTHOR = "Ko, D. and Choi, J. and Choi, H.K. and On, K.W. and Roh, B. and Kim, H.W.J.", TITLE = "MELTR: Meta Loss Transformer for Learning to Fine-tune Video Foundation Models", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "20105-20115", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147198"} @inproceedings{bb151271, AUTHOR = "Piergiovanni, A.J. and Kuo, W.C. and Angelova, A.", TITLE = "Rethinking Video ViTs: Sparse Video Tubes for Joint Image and Video Learning", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "2214-2224", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147199"} @inproceedings{bb151272, AUTHOR = "Park, J. and Lee, J.Y. and Sohn, K.H.", TITLE = "Dual-Path Adaptation from Image to Video Transformers", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "2203-2213", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147200"} @inproceedings{bb151273, AUTHOR = "Karim, R. and Zhao, H. and Wildes, R.P. and Siam, M.", TITLE = "MED-VT: Multiscale Encoder-Decoder Video Transformer with Application to Object Segmentation", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "6323-6333", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147201"} @inproceedings{bb151274, AUTHOR = "Yu, L.J. and Cheng, Y. and Sohn, K. and Lezama, J. and Zhang, H. and Chang, H. and Hauptmann, A.G. and Yang, M.H. and Hao, Y. and Essa, I. and Jiang, L.", TITLE = "MAGVIT: Masked Generative Video Transformer", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "10459-10469", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147202"} @inproceedings{bb151275, AUTHOR = "Xing, Z. and Dai, Q. and Hu, H. and Chen, J.J. and Wu, Z.X. and Jiang, Y.G.", TITLE = "SVFormer: Semi-supervised Video Transformer for Action Recognition", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "18816-18826", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147203"} @inproceedings{bb151276, AUTHOR = "Xie, F. and Chu, L. and Li, J.H. and Lu, Y. and Ma, C.", TITLE = "VideoTrack: Learning to Track Objects via Video Transformer", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "22826-22835", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147204"} @inproceedings{bb151277, AUTHOR = "Qiu, Z.W. and Yang, Q.S. and Wang, J. and Feng, H.C. and Han, J.Y. and Ding, E. and Xu, C. and Fu, D.M. and Wang, J.D.", TITLE = "PSVT: End-to-End Multi-Person 3D Pose and Shape Estimation with Progressive Video Transformers", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "21254-21263", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147205"} @inproceedings{bb151278, AUTHOR = "Yang, J. and Chen, J.W. and Yanai, K.", TITLE = "Transformer-based Cross-modal Recipe Embeddings with Large Batch Training", BOOKTITLE = MMMod23, YEAR = "2023", PAGES = "II: 471-482", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147206"} @inproceedings{bb151279, AUTHOR = "Li, Y. and Min, K. and Tripathi, S. and Vasconcelos, N.M.", TITLE = "SViTT: Temporal Learning of Sparse Video-Text Transformers", BOOKTITLE = CVPR23, YEAR = "2023", PAGES = "18919-18929", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147207"} @inproceedings{bb151280, AUTHOR = "Huang, K.W. and Chen, G.C.F. and Chang, P.W. and Lin, S.C. and Hsu, C. and Thengane, V. and Lin, J.Y.Y.", TITLE = "Strong Gravitational Lensing Parameter Estimation with Vision Transformer", BOOKTITLE = AI4Space22, YEAR = "2022", PAGES = "143-153", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147208"} @inproceedings{bb151281, AUTHOR = "Zheng, M. and Luo, J.P.", TITLE = "Space-time Video Super-resolution 3d Transformer", BOOKTITLE = MMMod23, YEAR = "2023", PAGES = "II: 374-385", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147209"} @inproceedings{bb151282, AUTHOR = "Ye, X. and Bilodeau, G.A.", TITLE = "VPTR: Efficient Transformers for Video Prediction", BOOKTITLE = "ICPR22", YEAR = "2022", PAGES = "3492-3499", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147210"} @inproceedings{bb151283, AUTHOR = "Liang, Y.X. and Zhou, P. and Zimmermann, R. and Yan, S.C.", TITLE = "DualFormer: Local-Global Stratified Transformer for Efficient Video Recognition", BOOKTITLE = ECCV22, YEAR = "2022", PAGES = "XXXIV:577-595", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147211"} @inproceedings{bb151284, AUTHOR = "Wang, J. and Yang, X.T. and Li, H.D. and Liu, L. and Wu, Z.X. and Jiang, Y.G.", TITLE = "Efficient Video Transformers with Spatial-Temporal Token Selection", BOOKTITLE = ECCV22, YEAR = "2022", PAGES = "XXXV:69-86", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147212"} @inproceedings{bb151285, AUTHOR = "Yuan, J. and Barmpoutis, P. and Stathaki, T.", TITLE = "Multi-Scale Deformable Transformer Encoder Based Single-Stage Pedestrian Detection", BOOKTITLE = ICIP22, YEAR = "2022", PAGES = "2906-2910", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147213"} @inproceedings{bb151286, AUTHOR = "Yun, H. and Lee, S. and Kim, G.", TITLE = "Panoramic Vision Transformer for Saliency Detection in 360° Videos", BOOKTITLE = ECCV22, YEAR = "2022", PAGES = "XXXV:422-439", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147214"} @inproceedings{bb151287, AUTHOR = "Sun, G.X. and Hua, Y. and Hu, G.S. and Robertson, N.", TITLE = "TDViT: Temporal Dilated Video Transformer for Dense Video Tasks", BOOKTITLE = ECCV22, YEAR = "2022", PAGES = "XXXV:285-301", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147215"} @inproceedings{bb151288, AUTHOR = "Wang, Y.H. and Zhang, J.C. and Li, Z.G. and Zeng, X. and Zhang, Z. and Zhang, D. and Long, Y. and Wang, N.", TITLE = "Neural Network-based In-Loop Filter for CLIC 2022", BOOKTITLE = CLIC22, YEAR = "2022", PAGES = "1773-1776", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147216"} @inproceedings{bb151289, AUTHOR = "Chang, H.W. and Zhang, H. and Jiang, L. and Liu, C. and Freeman, W.T.", TITLE = "MaskGIT: Masked Generative Image Transformer", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "11305-11315", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147217"} @inproceedings{bb151290, AUTHOR = "Herzig, R. and Ben Avraham, E. and Mangalam, K. and Bar, A. and Chechik, G. and Rohrbach, A. and Darrell, T.J. and Globerson, A.", TITLE = "Object-Region Video Transformers", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "3138-3149", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147218"} @inproceedings{bb151291, AUTHOR = "Wang, R. and Chen, D.D. and Wu, Z.X. and Chen, Y.P. and Dai, X. and Liu, M.C. and Jiang, Y.G. and Zhou, L. and Yuan, L.", TITLE = "BEVT: BERT Pretraining of Video Transformers", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "14713-14723", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147219"} @inproceedings{bb151292, AUTHOR = "Wu, C.Y. and Li, Y. and Mangalam, K. and Fan, H.Q. and Xiong, B. and Malik, J. and Feichtenhofer, C.", TITLE = "MeMViT: Memory-Augmented Multiscale Vision Transformer for Efficient Long-Term Video Recognition", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "13577-13587", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147220"} @inproceedings{bb151293, AUTHOR = "Mangalam, K. and Fan, H.Q. and Li, Y. and Wu, C.Y. and Xiong, B. and Feichtenhofer, C. and Malik, J.", TITLE = "Reversible Vision Transformers", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "10820-10830", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147221"} @inproceedings{bb151294, AUTHOR = "Li, Y. and Wu, C.Y. and Fan, H.Q. and Mangalam, K. and Xiong, B. and Malik, J. and Feichtenhofer, C.", TITLE = "MViTv2: Improved Multiscale Vision Transformers for Classification and Detection", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "4794-4804", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147222"} @inproceedings{bb151295, AUTHOR = "Ranasinghe, K. and Naseer, M. and Khan, S. and Khan, F.S. and Ryoo, M.S.", TITLE = "Self-supervised Video Transformer", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "2864-2874", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147223"} @inproceedings{bb151296, AUTHOR = "Yang, S.S. and Wang, X.G. and Li, Y. and Fang, Y.X. and Fang, J. and Liu, W.Y. and Zhao, X. and Shan, Y.", TITLE = "Temporally Efficient Vision Transformer for Video Instance Segmentation", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "2875-2885", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147224"} @inproceedings{bb151297, AUTHOR = "Liu, Z. and Ning, J. and Cao, Y. and Wei, Y.X. and Zhang, Z. and Lin, S. and Hu, H.", TITLE = "Video Swin Transformer", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "3192-3201", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147225"} @inproceedings{bb151298, AUTHOR = "Yan, S. and Xiong, X. and Arnab, A. and Lu, Z.C. and Zhang, M. and Sun, C. and Schmid, C.", TITLE = "Multiview Transformers for Video Recognition", BOOKTITLE = CVPR22, YEAR = "2022", PAGES = "3323-3333", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147226"} @inproceedings{bb151299, AUTHOR = "Shao, R.Z. and Wu, G. and Zhou, Y.M. and Fu, Y. and Fang, L. and Liu, Y.B.", TITLE = "LocalTrans: A Multiscale Local Transformer Network for Cross-Resolution Homography Estimation", BOOKTITLE = ICCV21, YEAR = "2021", PAGES = "14870-14879", BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651vidt3.html#TT147227"}