|
a |
|
b/scripts/tutorials/5-generation_advanced.py |
|
|
1 |
""" |
|
|
2 |
|
|
|
3 |
This script shows how to generate synthetic images with narrowed intensity distributions (e.g. T1-weighted scans) and |
|
|
4 |
at a specific resolution. All the arguments shown here can be used in the training function. |
|
|
5 |
These parameters were not explained in the previous tutorials as they were not used for the training of SynthSeg. |
|
|
6 |
|
|
|
7 |
Specifically, this script generates 5 examples of training data simulating 3mm axial T1 scans, which have been resampled |
|
|
8 |
at 1mm resolution to be segmented. |
|
|
9 |
Contrast-specificity is achieved by now imposing Gaussian priors (instead of uniform) over the GMM parameters. |
|
|
10 |
Resolution-specificity is achieved by first blurring and downsampling to the simulated LR. The data will then be |
|
|
11 |
upsampled back to HR, so that the downstream network is trained to segment at HR. This upsampling step mimics the |
|
|
12 |
process that will happen at test time. |
|
|
13 |
|
|
|
14 |
|
|
|
15 |
|
|
|
16 |
If you use this code, please cite one of the SynthSeg papers: |
|
|
17 |
https://github.com/BBillot/SynthSeg/blob/master/bibtex.bib |
|
|
18 |
|
|
|
19 |
Copyright 2020 Benjamin Billot |
|
|
20 |
|
|
|
21 |
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in |
|
|
22 |
compliance with the License. You may obtain a copy of the License at |
|
|
23 |
https://www.apache.org/licenses/LICENSE-2.0 |
|
|
24 |
Unless required by applicable law or agreed to in writing, software distributed under the License is |
|
|
25 |
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|
|
26 |
implied. See the License for the specific language governing permissions and limitations under the |
|
|
27 |
License. |
|
|
28 |
""" |
|
|
29 |
|
|
|
30 |
|
|
|
31 |
import os |
|
|
32 |
import numpy as np |
|
|
33 |
from ext.lab2im import utils |
|
|
34 |
from SynthSeg.brain_generator import BrainGenerator |
|
|
35 |
|
|
|
36 |
# script parameters |
|
|
37 |
n_examples = 5 # number of examples to generate in this script |
|
|
38 |
result_dir = './outputs_tutorial_5' # folder where examples will be saved |
|
|
39 |
|
|
|
40 |
|
|
|
41 |
# path training label maps |
|
|
42 |
path_label_map = '../../data/training_label_maps' |
|
|
43 |
generation_labels = '../../data/labels_classes_priors/generation_labels.npy' |
|
|
44 |
output_labels = '../../data/labels_classes_priors/synthseg_segmentation_labels.npy' |
|
|
45 |
n_neutral_labels = 18 |
|
|
46 |
output_shape = 160 |
|
|
47 |
|
|
|
48 |
|
|
|
49 |
# ---------- GMM sampling parameters ---------- |
|
|
50 |
|
|
|
51 |
# Here we use Gaussian priors to control the means and standard deviations of the GMM. |
|
|
52 |
prior_distributions = 'normal' |
|
|
53 |
|
|
|
54 |
# Here we still regroup labels into classes of similar tissue types: |
|
|
55 |
# Example: (continuing the example of tutorial 1) generation_labels = [0, 24, 507, 2, 3, 4, 17, 25, 41, 42, 43, 53, 57] |
|
|
56 |
# generation_classes = [0, 1, 2, 3, 4, 5, 4, 6, 3, 4, 5, 4, 6] |
|
|
57 |
# Note that structures with right/left labels are now associated with the same class. |
|
|
58 |
generation_classes = '../../data/labels_classes_priors/generation_classes_contrast_specific.npy' |
|
|
59 |
|
|
|
60 |
# We specify here the hyperparameters governing the prior distribution of the GMM. |
|
|
61 |
# As these prior distributions are Gaussian, they are each controlled by a mean and a standard deviation. |
|
|
62 |
# Therefore, the numpy array pointed by prior_means is of size (2, K), where K is the total number of classes specified |
|
|
63 |
# in generation_classes. The first row of prior_means correspond to the means of the Gaussian priors, and the second row |
|
|
64 |
# correspond to standard deviations. |
|
|
65 |
# |
|
|
66 |
# Example: (continuing the previous one) prior_means = np.array([[0, 30, 80, 110, 95, 40, 70] |
|
|
67 |
# [0, 10, 50, 15, 10, 15, 30]]) |
|
|
68 |
# This means that intensities of label 3 and 17, which are both in class 4, will be drawn from the Gaussian |
|
|
69 |
# distribution, whose mean will be sampled from the Gaussian distribution with index 4 in prior_means N(95, 10). |
|
|
70 |
# Here is the complete table of correspondence for this example: |
|
|
71 |
# mean of Gaussian for label 0 drawn from N(0,0)=0 |
|
|
72 |
# mean of Gaussian for label 24 drawn from N(30,10) |
|
|
73 |
# mean of Gaussian for label 507 drawn from N(80,50) |
|
|
74 |
# mean of Gaussian for labels 2 and 41 drawn from N(110,15) |
|
|
75 |
# mean of Gaussian for labels 3, 17, 42, 53 drawn from N(95,10) |
|
|
76 |
# mean of Gaussian for labels 4 and 43 drawn from N(40,15) |
|
|
77 |
# mean of Gaussian for labels 25 and 57 drawn from N(70,30) |
|
|
78 |
# These hyperparameters were estimated with the function SynthSR/estimate_priors.py/build_intensity_stats() |
|
|
79 |
prior_means = '../../data/labels_classes_priors/prior_means_t1.npy' |
|
|
80 |
# same as for prior_means, but for the standard deviations of the GMM. |
|
|
81 |
prior_stds = '../../data/labels_classes_priors/prior_stds_t1.npy' |
|
|
82 |
|
|
|
83 |
# ---------- Resolution parameters ---------- |
|
|
84 |
|
|
|
85 |
# here we aim to synthesise data at a specific resolution, thus we do not randomise it anymore ! |
|
|
86 |
randomise_res = False |
|
|
87 |
|
|
|
88 |
# blurring/downsampling parameters |
|
|
89 |
# We specify here the slice spacing/thickness that we want the synthetic scans to mimic. The axes refer to the *RAS* |
|
|
90 |
# axes, as all the provided data (label maps and images) will be automatically aligned to those axes during training. |
|
|
91 |
# RAS refers to Right-left/Anterior-posterior/Superior-inferior axes, i.e. sagittal/coronal/axial directions. |
|
|
92 |
data_res = np.array([1., 1., 3.]) # slice spacing i.e. resolution to mimic |
|
|
93 |
thickness = np.array([1., 1., 3.]) # slice thickness |
|
|
94 |
|
|
|
95 |
# ------------------------------------------------------ Generate ------------------------------------------------------ |
|
|
96 |
|
|
|
97 |
# instantiate BrainGenerator object |
|
|
98 |
brain_generator = BrainGenerator(labels_dir=path_label_map, |
|
|
99 |
generation_labels=generation_labels, |
|
|
100 |
output_labels=output_labels, |
|
|
101 |
n_neutral_labels=n_neutral_labels, |
|
|
102 |
output_shape=output_shape, |
|
|
103 |
prior_distributions=prior_distributions, |
|
|
104 |
generation_classes=generation_classes, |
|
|
105 |
prior_means=prior_means, |
|
|
106 |
prior_stds=prior_stds, |
|
|
107 |
randomise_res=randomise_res, |
|
|
108 |
data_res=data_res, |
|
|
109 |
thickness=thickness) |
|
|
110 |
|
|
|
111 |
for n in range(n_examples): |
|
|
112 |
|
|
|
113 |
# generate new image and corresponding labels |
|
|
114 |
im, lab = brain_generator.generate_brain() |
|
|
115 |
|
|
|
116 |
# save output image and label map |
|
|
117 |
utils.save_volume(im, brain_generator.aff, brain_generator.header, |
|
|
118 |
os.path.join(result_dir, 'image_t1_%s.nii.gz' % n)) |
|
|
119 |
utils.save_volume(lab, brain_generator.aff, brain_generator.header, |
|
|
120 |
os.path.join(result_dir, 'labels_t1_%s.nii.gz' % n)) |