|
a |
|
b/scripts/tutorials/6-intensity_estimation.py |
|
|
1 |
""" |
|
|
2 |
|
|
|
3 |
Examples to show how to estimate of the hyperparameters governing the GMM prior distributions. |
|
|
4 |
This in the case where you want to train contrast-specific versions of SynthSeg. |
|
|
5 |
Beware, if you do so, your model will not be able to segment any contrast at test time ! |
|
|
6 |
We do not provide example images and associated label maps, so do not try to run this directly ! |
|
|
7 |
|
|
|
8 |
|
|
|
9 |
|
|
|
10 |
|
|
|
11 |
If you use this code, please cite one of the SynthSeg papers: |
|
|
12 |
https://github.com/BBillot/SynthSeg/blob/master/bibtex.bib |
|
|
13 |
|
|
|
14 |
Copyright 2020 Benjamin Billot |
|
|
15 |
|
|
|
16 |
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in |
|
|
17 |
compliance with the License. You may obtain a copy of the License at |
|
|
18 |
https://www.apache.org/licenses/LICENSE-2.0 |
|
|
19 |
Unless required by applicable law or agreed to in writing, software distributed under the License is |
|
|
20 |
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|
|
21 |
implied. See the License for the specific language governing permissions and limitations under the |
|
|
22 |
License. |
|
|
23 |
""" |
|
|
24 |
|
|
|
25 |
|
|
|
26 |
from SynthSeg.estimate_priors import build_intensity_stats |
|
|
27 |
|
|
|
28 |
# ----------------------------------------------- simple uni-modal case ------------------------------------------------ |
|
|
29 |
|
|
|
30 |
# paths of directories containing the images and corresponding label maps |
|
|
31 |
image_dir = '/image_folder/t1' |
|
|
32 |
labels_dir = '/labels_folder' |
|
|
33 |
# list of labels from which we want to evaluate the GMM prior distributions |
|
|
34 |
estimation_labels = '../../data/labels_classes_priors/generation_labels.npy' |
|
|
35 |
# path of folder where to write estimated priors |
|
|
36 |
result_dir = './outputs_tutorial_6/t1_priors' |
|
|
37 |
|
|
|
38 |
build_intensity_stats(list_image_dir=image_dir, |
|
|
39 |
list_labels_dir=labels_dir, |
|
|
40 |
estimation_labels=estimation_labels, |
|
|
41 |
result_dir=result_dir, |
|
|
42 |
rescale=True) |
|
|
43 |
|
|
|
44 |
# ------------------------------------ building Gaussian priors from several labels ------------------------------------ |
|
|
45 |
|
|
|
46 |
# same as before |
|
|
47 |
image_dir = '/image_folder/t1' |
|
|
48 |
labels_dir = '/labels_folder' |
|
|
49 |
estimation_labels = '../../data/labels_classes_priors/generation_labels.npy' |
|
|
50 |
result_dir = './outputs_tutorial_6/estimated_t1_priors_classes' |
|
|
51 |
|
|
|
52 |
# In the previous example, each label value is used to build the priors of a single Gaussian distribution. |
|
|
53 |
# We show here how to build Gaussian priors from intensities associated to several label values. For example, that could |
|
|
54 |
# be building the Gaussian prior of white matter by using the labels of right and left white matter. |
|
|
55 |
# This is done by specifying a vector, which regroups label values into "classes". |
|
|
56 |
# Labels sharing the same class will contribute to the construction of the same Gaussian prior. |
|
|
57 |
estimation_classes = '../../data/labels_classes_priors/generation_classes.npy' |
|
|
58 |
|
|
|
59 |
build_intensity_stats(list_image_dir=image_dir, |
|
|
60 |
list_labels_dir=labels_dir, |
|
|
61 |
estimation_labels=estimation_labels, |
|
|
62 |
estimation_classes=estimation_classes, |
|
|
63 |
result_dir=result_dir, |
|
|
64 |
rescale=True) |
|
|
65 |
|
|
|
66 |
# ---------------------------------------------- simple multi-modal case ----------------------------------------------- |
|
|
67 |
|
|
|
68 |
# Here we have multi-modal images, where every image contains all channels. |
|
|
69 |
# Channels are supposed to be sorted in the same order for all subjects. |
|
|
70 |
image_dir = '/image_folder/multi-modal_t1_t2' |
|
|
71 |
|
|
|
72 |
# same as before |
|
|
73 |
labels_dir = '/labels_folder' |
|
|
74 |
estimation_labels = '../../data/labels_classes_priors/generation_labels.npy' |
|
|
75 |
estimation_classes = '../../data/labels_classes_priors/generation_classes.npy' |
|
|
76 |
result_dir = './outputs_tutorial_6/estimated_priors_multi_modal' |
|
|
77 |
|
|
|
78 |
build_intensity_stats(list_image_dir=image_dir, |
|
|
79 |
list_labels_dir=labels_dir, |
|
|
80 |
estimation_labels=estimation_labels, |
|
|
81 |
estimation_classes=estimation_classes, |
|
|
82 |
result_dir=result_dir, |
|
|
83 |
rescale=True) |
|
|
84 |
|
|
|
85 |
# ------------------------------------- multi-modal images with separate channels ------------------------------------- |
|
|
86 |
|
|
|
87 |
# Here we have multi-modal images, where the different channels are stored in separate directories. |
|
|
88 |
# We provide the these different directories as a list. |
|
|
89 |
list_image_dir = ['/image_folder/t1', '/image_folder/t2'] |
|
|
90 |
# In this example, we assume that channels are registered and at the same resolutions. |
|
|
91 |
# Therefore we can use the same label maps for all channels. |
|
|
92 |
labels_dir = '/labels_folder' |
|
|
93 |
|
|
|
94 |
# same as before |
|
|
95 |
estimation_labels = '../../data/labels_classes_priors/generation_labels.npy' |
|
|
96 |
estimation_classes = '../../data/labels_classes_priors/generation_classes.npy' |
|
|
97 |
result_dir = './outputs_tutorial_6/estimated_priors_multi_modal' |
|
|
98 |
|
|
|
99 |
build_intensity_stats(list_image_dir=list_image_dir, |
|
|
100 |
list_labels_dir=labels_dir, |
|
|
101 |
estimation_labels=estimation_labels, |
|
|
102 |
estimation_classes=estimation_classes, |
|
|
103 |
result_dir=result_dir, |
|
|
104 |
rescale=True) |
|
|
105 |
|
|
|
106 |
# ------------------------------------ multi-modal case with unregistered channels ------------------------------------- |
|
|
107 |
|
|
|
108 |
# Again, we have multi-modal images where the different channels are stored in separate directories. |
|
|
109 |
list_image_dir = ['/image_folder/t1', '/image_folder/t2'] |
|
|
110 |
# In this example, we assume that the channels are no longer registered. |
|
|
111 |
# Therefore we cannot use the same label maps for all channels, and must provide label maps for all modalities. |
|
|
112 |
labels_dir = ['/labels_folder/t1', '/labels_folder/t2'] |
|
|
113 |
|
|
|
114 |
# same as before |
|
|
115 |
estimation_labels = '../../data/labels_classes_priors/generation_labels.npy' |
|
|
116 |
estimation_classes = '../../data/labels_classes_priors/generation_classes.npy' |
|
|
117 |
result_dir = './outputs_tutorial_6/estimated_unregistered_multi_modal' |
|
|
118 |
|
|
|
119 |
build_intensity_stats(list_image_dir=list_image_dir, |
|
|
120 |
list_labels_dir=labels_dir, |
|
|
121 |
estimation_labels=estimation_labels, |
|
|
122 |
estimation_classes=estimation_classes, |
|
|
123 |
result_dir=result_dir, |
|
|
124 |
rescale=True) |