@ARTICLE{ iet:/content/journals/10.1049/el.2018.7235, author = {Wendong Chen}, author = {Haifeng Hu}, keywords = {main network structure;facial expression recognition;auxiliary learning network whose inputs;auxiliary network structure;joint prominent expression feature regions;expression changes;prominent expressions;main network whose inputs;public facial expression databases;auxiliary task learning network;}, ISSN = {0013-5194}, language = {English}, abstract = {The key issue for facial expression recognition (FER) is to concentrate on the prominent expression feature regions where the expression changes. In this Letter, the authors propose a novel and effective FER framework jointing prominent expression feature regions in an auxiliary task learning network (ATLN). The proposed approach consists of two deep learning neural networks, one of which is the main network whose inputs are complete face images, and the other is the auxiliary learning network whose inputs are pre-processed face images containing prominent expression feature regions. The main network structure of the ATLN improves its ability to focus on regions with prominent expressions changing by sharing parameters with their auxiliary network structure. They carry out experiments on two public facial expression databases, namely, CK+ and MMI. Experimental results demonstrate the superior performance of proposed method.}, title = {Joint prominent expression feature regions in auxiliary task learning network for facial expression recognition}, journal = {Electronics Letters}, issue = {1}, volume = {55}, year = {2019}, month = {January}, pages = {22-24(2)}, publisher ={Institution of Engineering and Technology}, copyright = {© The Institution of Engineering and Technology}, url = {https://digital-library.theiet.org/;jsessionid=1j69xdecr7hn5.x-iet-live-01content/journals/10.1049/el.2018.7235} }