Diff of /test_env.py [000000] .. [42b7b1]

Switch to unified view

a b/test_env.py
1
"""script predicts the segmentation results of a given testing dataset using a virtual environment.
2
3
This script allows users to enter the directory path to the testing PET images.  It also accepts the output
4
directory to save predicted Nifti images and computed quantitative biomarkers features. If you chose to run it using a
5
virtual environment, please activate the environment first.
6
7
"""
8
9
import os
10
import sys
11
12
from src.LFBNet.utilities import train_valid_paths
13
from src.LFBNet.preprocessing import preprocessing
14
from src.run import trainer, parse_argument
15
from src.LFBNet.utilities.compute_surrogate_features import ComputesTMTVsDmaxFromNii
16
17
18
def main():
19
    """ Predicts tumor segmentation results and calculate associated quantitative metrics on a given testing dataset
20
21
    This function receives the path directory to the testing dataset that contains the PET images. It predicts the
22
    segmentation results and save as .nii files. It then calculates the surrogate metabolic tumor volume (sTMTV) and
23
    surrogate dissemination feature (sDmax) and save as csv or xls file.
24
25
    Acronyms:
26
        PET: Nifti format of [18]F-FDG PET images in SUV unit.
27
        GT: Ground truth mask from the expert if available.
28
29
    [directory_path_to_raw 3D nifti data with SUV values] should have the following structure as:
30
    main_dir:
31
        -- patient_id_1:
32
            -- PET
33
                --give_name.nii or give_name.nii.gz
34
            -- GT (if available) (Ground truth mask from the expert if available)
35
                -- give_name.nii or give_name.nii.gz
36
37
         -- patient_id_2:
38
            -- PET
39
                --give_name.nii or give_name.nii.gz
40
            -- GT (if available)
41
                -- give_name.nii or give_name.nii.gz
42
43
    It reads the .nii files, resize, crop, and save the 3D data, then from these data it generates the sagittal and
44
    coronal PET MIPs and the ground truth (mask from the expert) if available in the folder.
45
46
    Get the latest trained model weight from './weight' directory and use that weight to predict the segmentation.
47
48
    Returns:
49
        save segmented images and computed surrogate biomarker features using the last weight saved in the ./weight
50
        folder.
51
    """
52
    # parse input and output directory path:
53
    # Path to the parent/main directory. Please read readme.md for how to organize your files.
54
    # parse input directory,
55
    args = parse_argument.get_parsed_arguments_test_case()
56
    input_dir = args.input_dir
57
    preprocessing_data_dir = args.output_dir
58
59
    # If input directory path is not given use the default "/input_data"
60
    if input_dir is None:
61
        input_dir = "/input"
62
        if not os.path.exists(input_dir):
63
            os.mkdir(input_dir)
64
65
    # if output path is not given in the argument, create "/output"
66
    if preprocessing_data_dir is None:
67
        preprocessing_data_dir = "/output"
68
        if not os.path.exists(preprocessing_data_dir):
69
            os.mkdir(preprocessing_data_dir)
70
71
    
72
    # parameters
73
    dataset_name = 'data'
74
    desired_spacing = [4.0, 4.0, 4.0]
75
76
    preprocessing_params = dict(
77
        data_path=input_dir, data_name=dataset_name, saving_dir=preprocessing_data_dir, save_3D=True,
78
        output_resolution=[128, 128, 256], desired_spacing=desired_spacing, generate_mip=True
79
        )
80
81
    mip_data_dir = preprocessing.read_pet_gt_resize_crop_save_as_3d_andor_mip(**preprocessing_params)
82
83
    # get list of all patient names from the generated mip directory
84
    patient_id_list = os.listdir(mip_data_dir)
85
    print('There are %d cases to evaluate \n' % len(patient_id_list))
86
87
    # prediction on the given testing dataset
88
    test_params = dict(
89
        preprocessed_dir=mip_data_dir, data_list=patient_id_list, predicted_dir=preprocessing_data_dir
90
        )
91
    network_run = trainer.ModelTesting(**test_params)
92
    network_run.test()
93
94
    print("\n\n Computing the surrogate biomarkers ... \n\n")
95
    for identifier, data_path in zip(
96
            ["predicted", "ground_truth"], [os.path.join(preprocessing_data_dir, "predicted_data"),
97
                            os.path.join(preprocessing_data_dir, "data_default_MIP_dir")]
98
            ):
99
        try:
100
            csv_file = ComputesTMTVsDmaxFromNii(data_path=data_path, get_identifier=identifier)
101
            csv_file.compute_and_save_surrogate_features()
102
        except:
103
            continue
104
105
106
# check
107
if __name__ == '__main__':
108
    print("\n Running the integrated framework for testing use case... \n\n")
109
    main()