@ARTICLE{ iet:/content/journals/10.1049/iet-cvi.2018.5104, author = {Chenchao Xiang}, author = {Zhou Yu}, author = {Suguo Zhu}, author = {Jun Yu}, author = {Xiaokang Yang}, keywords = {visual grounding problem;multimodal factorised bilinear pooling model;Flickr-30k Entities dataset;textual query phrase;end-to-end approach;region-based visual features;region proposal networks;ReferItGame dataset;visual features;phrase-based visual grounding;real-world visual grounding datasets;off-the-shelf proposal generation model;RefCOCO dataset;multimodal features;object proposals;}, ISSN = {1751-9632}, language = {English}, abstract = {Phrase-based visual grounding aims to localise the object in the image referred by a textual query phrase. Most existing approaches adopt a two-stage mechanism to address this problem: first, an off-the-shelf proposal generation model is adopted to extract region-based visual features, and then a deep model is designed to score the proposals based on the query phrase and extracted visual features. In contrast to that, the authors design an end-to-end approach to tackle the visual grounding problem in this study. They use a region proposal network to generate object proposals and the corresponding visual features simultaneously, and multi-modal factorised bilinear pooling model to fuse the multi-modal features effectively. After that, two novel losses are posed on top of the multi-modal features to rank and refine the proposals, respectively. To verify the effectiveness of the proposed approach, the authors conduct experiments on three real-world visual grounding datasets, namely Flickr-30k Entities, ReferItGame and RefCOCO. The experimental results demonstrate the significant superiority of the proposed method over the existing state-of-the-arts.}, title = {End-to-end visual grounding via region proposal networks and bilinear pooling}, journal = {IET Computer Vision}, issue = {2}, volume = {13}, year = {2019}, month = {March}, pages = {131-138(7)}, publisher ={Institution of Engineering and Technology}, copyright = {© The Institution of Engineering and Technology}, url = {https://digital-library.theiet.org/;jsessionid=o3fej86pis9k.x-iet-live-01content/journals/10.1049/iet-cvi.2018.5104} }