[7c5f70]: / Crawler / crawler.py

Download this file

95 lines (63 with data), 3.0 kB

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import os
from time import time
import datetime
from Crawler.utilities import DataLoader, SaveResults, ProcessAnimal
from Crawler.update_radiomics_lists import update_radiomics_list
def run_crawler():
tstart = time()
# Set up data paths
path = '/media/matt/Seagate Expansion Drive/b7TData_19/b7TData'
path_regex = '*.dcm'
log_file = os.path.join(path, 'Results/processing_log.json')
summary_file = os.path.join(path, 'Results/Summary.xlsx')
# Ensure results path exists
out_path = os.path.join(path, 'Results')
if not os.path.exists(out_path):
os.mkdir(out_path)
# Model
# model_path = '/media/blkbeauty3/Matt/ML_Sarcoma_Results/2019_01_22_01-33-59_skip_all_contrasts_lr2e-4_400ep/'
model_path = '/media/matt/Seagate Expansion Drive/MR Data/ML_Results/' \
'2019_11_09_14-12-47_cnn_model_3D_3lyr_do_relu_xentropy_skip'
# Define data management classes
loader = DataLoader(base_path=path, folder_regex=path_regex, log_file=log_file)
saver = SaveResults(data_base_path=path, save_folder='Results', log_file=log_file)
# Run crawler
flag = True
while flag:
# Get working folder
folder_exists = loader.get_folder()
if folder_exists: # or if the generator is not exhausted
# Get animal name, study date, acquisition time, and protocol
animal_id, study_date = loader.load_study_data()
# Compare to animals already processed
already_processed = loader.compare_with_log()
if not already_processed:
# Generate save paths
snames = saver.gen_save_path(animal_id=animal_id, study_date=study_date)
# Load data
data = loader.load_dicom()
# Save Nifti images to path
saver.resave_image_volumes(X=data)
saver.save_dicom_header(loader.names[0][0])
# Set up processing functions for this animal
process = ProcessAnimal(snames)
# Make bias-corrected T2 images
process.bias_correct()
# Process data - Segmentation
process.segment_tumor(model_path)
# Process data - Compute radiomics
radiomics_sfiles = process.compute_radiomics(animal_id)
# Update report
# Clear working directory
saver.clear_working_directory()
# Update log file
saver.append_to_log(animal_id=animal_id, study_date=study_date)
# Save the updated log file
saver.save_log()
else:
flag = False
# Update radiomics paths
update_radiomics_list(out_path, summary_file)
print('\tTotal time (HH:MM:SS): %s\n\n' % (str(datetime.timedelta(seconds=round(time() - tstart)))))
if __name__ == "__main__":
run_crawler()