@ARTICLE{ iet:/content/journals/10.1049/htl.2016.0103, author = {Ian McLoughlin}, author = {Jingjie Li}, author = {Yan Song}, author = {Hamid R. Sharifzadeh}, keywords = {larynx related dysphonia;statistical speech reconstruction;DNN structure;deep partially supervised neural network;partially supervised training approach;restricted Boltzmann machine arrays;Gaussian mixture models;voice-loss patients;}, language = {English}, abstract = {Statistical speech reconstruction for larynx-related dysphonia has achieved good performance using Gaussian mixture models and, more recently, restricted Boltzmann machine arrays; however, deep neural network (DNN)-based systems have been hampered by the limited amount of training data available from individual voice-loss patients. The authors propose a novel DNN structure that allows a partially supervised training approach on spectral features from smaller data sets, yielding very good results compared with the current state-of-the-art.}, title = {Speech reconstruction using a deep partially supervised neural network}, journal = {Healthcare Technology Letters}, issue = {4}, volume = {4}, year = {2017}, month = {August}, pages = {129-133(4)}, publisher ={Institution of Engineering and Technology}, copyright = {This is an open access article published by the IET under the Creative Commons Attribution -NonCommercial License (http://creativecommons.org/licenses/by-nc/3.0/)}, url = {https://digital-library.theiet.org/;jsessionid=1vauebgjva5jc.x-iet-live-01content/journals/10.1049/htl.2016.0103} }