-
Notifications
You must be signed in to change notification settings - Fork 10
/
AssemblyNet.bib
12 lines (12 loc) · 1.89 KB
/
AssemblyNet.bib
1
2
3
4
5
6
7
8
9
10
11
12
@article{COUPE2020117026,
title = {AssemblyNet: A large ensemble of CNNs for 3D whole brain MRI segmentation},
journal = {NeuroImage},
volume = {219},
pages = {117026},
year = {2020},
issn = {1053-8119},
doi = {https://doi.org/10.1016/j.neuroimage.2020.117026},
url = {https://www.sciencedirect.com/science/article/pii/S1053811920305127},
author = {Pierrick Coupé and Boris Mansencal and Michaël Clément and Rémi Giraud and Baudouin {Denis de Senneville} and Vinh-Thong Ta and Vincent Lepetit and José V. Manjon},
abstract = {Abstract Whole brain segmentation of fine-grained structures using deep learning (DL) is a very challenging task since the number of anatomical labels is very high compared to the number of available training images. To address this problem, previous DL methods proposed to use a single convolution neural network (CNN) or few independent CNNs. In this paper, we present a novel ensemble method based on a large number of CNNs processing different overlapping brain areas. Inspired by parliamentary decision-making systems, we propose a framework called AssemblyNet, made of two “assemblies" of U-Nets. Such a parliamentary system is capable of dealing with complex decisions, unseen problem and reaching a relevant consensus. AssemblyNet introduces sharing of knowledge among neighboring U-Nets, an “amendment” procedure made by the second assembly at higher-resolution to refine the decision taken by the first one, and a final decision obtained by majority voting. During our validation, AssemblyNet showed competitive performance compared to state-of-the-art methods such as U-Net, Joint label fusion and SLANT. Moreover, we investigated the scan-rescan consistency and the robustness to disease effects of our method. These experiences demonstrated the reliability of AssemblyNet. Finally, we showed the interest of using semi-supervised learning to improve the performance of our method.}
}