Diff of /Notebooks/usage.ipynb [000000] .. [44bf8c]

Switch to side-by-side view

--- a
+++ b/Notebooks/usage.ipynb
@@ -0,0 +1,370 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Training Example"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Training a model is very simple, follow this example to train your own model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#First import the training tool and the torchio library\n",
+    "import sys\n",
+    "sys.path.append('../Radiology_and_AI')\n",
+    "from training.run_training import run_training\n",
+    "import torchio as tio"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#Next define what transforms you want applied to the training data\n",
+    "#Both the training and validation data must have the same normalization and data preparation steps\n",
+    "#Only the training samples should have the augmentations applied\n",
+    "#Any transforms found at https://torchio.readthedocs.io/transforms/transforms.html can be applied\n",
+    "#Keep track of the  normalization and data preparation steps steps performed, you will need to apply the to all data passed into the model into the future\n",
+    "\n",
+    "#These transforms are applied to data before it is used for training the model\n",
+    "training_transform = tio.Compose([\n",
+    "    #Normalization\n",
+    "    tio.ZNormalization(masking_method=tio.ZNormalization.mean), \n",
+    "    \n",
+    "    #Augmentation\n",
+    "    #Play around with different augmentations as you desire, refer to the torchio docs to see how they work\n",
+    "    tio.RandomNoise(p=0.5),\n",
+    "    tio.RandomGamma(log_gamma=(-0.3, 0.3)),\n",
+    "    tio.RandomElasticDeformation(),\n",
+    "    \n",
+    "    #Preparation\n",
+    "    tio.CropOrPad((240, 240, 160)), #Crop/pad the images to a dimension your model can handle, our default unnet model requires the dimensions be multiples of 8\n",
+    "    tio.OneHot(num_classes=5), #Set num_classes to the max segmentation label + 1\n",
+    "    \n",
+    "])\n",
+    "\n",
+    "#These transforms are applied to data before it is used to determined the performance of the model on the validation set\n",
+    "validation_transform = tio.Compose([\n",
+    "    #Normalization\n",
+    "    tio.ZNormalization(masking_method=tio.ZNormalization.mean),\n",
+    "    \n",
+    "    #Preparation\n",
+    "    tio.CropOrPad((240, 240, 160)),        \n",
+    "    tio.OneHot(num_classes=5)    \n",
+    "    \n",
+    "])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#The run training method applies the transforms you set and trains a model based on the parameters set here\n",
+    "run_training(\n",
+    "    #input_data_path must be set to the path to the folder containing the subfolders for each training example.\n",
+    "    #Each subfolder should contain one nii.gz file for each of the imaging series and the segmentation for that example\n",
+    "    #The name of each nii.gz file should be the name of the parent folder followed by the name of the imaging series type or seg if it is the segmentation\n",
+    "    #For example,MICCAI_BraTS2020_TrainingData contains ~300 folders, each corresponding to an input example,\n",
+    "    # one folder BraTS20_Training_001, contains five files: BraTS20_Training_001_flair.nii.gz, BraTS20_Training_001_seg.nii.gz, BraTS20_Training_001_t1.nii.gz , BraTS20_Training_001_t2.nii.gz,and BraTS20_Training_001_t1ce.nii.gz\n",
+    "    input_data_path = '../../brats_new/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData',\n",
+    "    \n",
+    "    #Where you want your trained model to be saved after training is completed\n",
+    "    output_model_path = '../Models/test_train_many_1e-3.pt',\n",
+    "    \n",
+    "    #The transforms you created previously\n",
+    "    training_transform = training_transform,    \n",
+    "    validation_transform = validation_transform,\n",
+    "    \n",
+    "    #The names of the modalities every example in your input data has\n",
+    "    input_channels_list = ['flair','t1','t2','t1ce'],\n",
+    "    \n",
+    "    #Which of the labels in your segmentation you want to train your model to predict\n",
+    "    seg_channels = [1,2,4],\n",
+    "    \n",
+    "    #The name of the type of model you want to train, currently UNet3D is the only available model\n",
+    "    model_type = 'UNet3D',\n",
+    "    \n",
+    "    #The amount of examples per training batch, reduce/increase this based on memory availability\n",
+    "    batch_size = 1,\n",
+    "    \n",
+    "    #The amount of cpus you want to be avaiable for loading the input data into the model\n",
+    "    num_loading_cpus = 1,\n",
+    "    \n",
+    "    #The learning rate of the AdamW optimizer\n",
+    "    learning_rate = 1e-3,\n",
+    "    \n",
+    "    #Whether or not you want to run wandb logging of your run, install wandb to use these parameters\n",
+    "    wandb_logging = False,\n",
+    "    wandb_project_name = None,\n",
+    "    wandb_run_name = None,\n",
+    "    \n",
+    "    #The seed determines how your training and validation data will be randomly split\n",
+    "    #training_split_ratio is the share of your input data you want to use for training the model, the remainder is used for the validation data\n",
+    "    #Keep track of both the seed and ratio used if you want to be able to split your input data the same way in the future\n",
+    "    seed=42,    \n",
+    "    training_split_ratio = 0.9,\n",
+    "    \n",
+    "    #Any parameters which can be applied to a pytorch lightning trainer can also be applied, below is a selection of parameters you can apply\n",
+    "    #Refer to https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-class-api to see the other parameters you could apply\n",
+    "    max_epochs=10,\n",
+    "    amp_backend = 'apex',\n",
+    "    amp_level = 'O1',\n",
+    "    precision=16,\n",
+    "    check_val_every_n_epoch = 1,\n",
+    "    log_every_n_steps=10,      \n",
+    "    val_check_interval= 50,\n",
+    "    progress_bar_refresh_rate=1,      \n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Evaluation Example"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### If you want to evaluate your model in the future on a certain test dataset follow the below"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#First import the training tool and the torchio library\n",
+    "import sys\n",
+    "sys.path.append('.../Radiology_and_AI')\n",
+    "from training.run_training import run_eval\n",
+    "import torchio as tio"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#Whatever normalization and data preperation steps you performed must also be applied here\n",
+    "#Refer to the above for more info\n",
+    "#These transforms are applied to data before it is used to determined the performance of the model on the validation set\n",
+    "test_transform = tio.Compose([\n",
+    "    #Normalization\n",
+    "    tio.ZNormalization(masking_method=tio.ZNormalization.mean),\n",
+    "    \n",
+    "    #Preparation\n",
+    "    tio.CropOrPad((240, 240, 160)),        \n",
+    "    tio.OneHot(num_classes=5)    \n",
+    "    \n",
+    "])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#The run_eval method evaluates and prints your models performance on a test dataset by averaging the Dice loss per batch\n",
+    "run_eval(\n",
+    "    #The path to the folder containing the data, refer to the training example for more info\n",
+    "    input_data_path= '../../brats_new/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData',\n",
+    "    \n",
+    "    #The path to the saved model weights\n",
+    "    model_path=\"../../randgamma.pt\",\n",
+    "    \n",
+    "    #The transforms you specified above\n",
+    "    validation_transform=validation_transform,   \n",
+    "    \n",
+    "    #The names of the modalities every example in your input data has\n",
+    "    input_channels_list = ['flair','t1','t2','t1ce'],\n",
+    "    #Which of the labels in your segmentation you want to train your model to predict\n",
+    "    seg_channels = [1,2,4],\n",
+    "    #The name of the type of model you want to train, currently UNet3D is the only available model\n",
+    "    model_type = 'UNet3D'\n",
+    "    \n",
+    "    #If set to true, we only return the performance of the model on the example which were not used for training, based on the train_val_split_ration and seed\n",
+    "    #If false we evaluate on all data and ignore seed and training_split_ratio,\n",
+    "    #set to false if input_data_path is set to a dataset you did not use during training\n",
+    "    is_validation_data = True,\n",
+    "    training_split_ratio=0.9,\n",
+    "    seed=42,\n",
+    "    \n",
+    "    #The amount of examples per training batch, reduce/increase this based on memory availability\n",
+    "    batch_size=1,\n",
+    "    #The amount of cpus you want to be avaiable for loading the input data into the model\n",
+    "    num_loading_cpus = 1,   \n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Visualization Example"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Tools for generating gifs, slices, and nifti files from input data and model predictions"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#First import the training tool and the torchio library\n",
+    "import sys\n",
+    "sys.path.append('../Radiology_and_AI')\n",
+    "sys.path.append('../../MedicalZooPytorch')\n",
+    "from visuals.run_visualization import gen_visuals\n",
+    "import torchio as tio"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#Whatever normalization and data preperation steps you performed must also be applied here\n",
+    "#Refer to the above for more info\n",
+    "#These transforms are applied to data before it is used to determined the performance of the model on the validation set\n",
+    "validation_transform = tio.Compose([\n",
+    "    #Normalization\n",
+    "    tio.ZNormalization(masking_method=tio.ZNormalization.mean),\n",
+    "    \n",
+    "    #Preparation\n",
+    "    tio.CropOrPad((240, 240, 160)),        \n",
+    "    tio.OneHot(num_classes=5)        \n",
+    "])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/cameron/storage/miniconda3/envs/cameronenv/lib/python3.8/site-packages/matplotlib/image.py:446: UserWarning: Warning: converting a masked element to nan.\n",
+      "  dv = np.float64(self.norm.vmax) - np.float64(self.norm.vmin)\n",
+      "/home/cameron/storage/miniconda3/envs/cameronenv/lib/python3.8/site-packages/matplotlib/image.py:453: UserWarning: Warning: converting a masked element to nan.\n",
+      "  a_min = np.float64(newmin)\n",
+      "/home/cameron/storage/miniconda3/envs/cameronenv/lib/python3.8/site-packages/matplotlib/image.py:458: UserWarning: Warning: converting a masked element to nan.\n",
+      "  a_max = np.float64(newmax)\n"
+     ]
+    }
+   ],
+   "source": [
+    "#The gen_visuals method can be used for generating gifs of the inpu\n",
+    "gen_visuals(\n",
+    "    #The path to the folder containing the nifti files for an example\n",
+    "    image_path=\"../../brats_new/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData/BraTS20_Training_010\",\n",
+    "    \n",
+    "    #The transforms applied to the input should the same applied to the validation data during model training\n",
+    "    transforms = validation_transform,\n",
+    "    \n",
+    "    #The path to the model to use for predictions \n",
+    "    model_path =  \"../Models/test_train_many_1e-3.pt\",\n",
+    "    \n",
+    "    #Generate visuals using segmentations generated by the model\n",
+    "    gen_pred = True,\n",
+    "    #Generate visuals using annotated segmentations\n",
+    "    gen_true = True,\n",
+    "    \n",
+    "    #The modalities your input example has\n",
+    "    input_channels_list = ['flair','t1','t2','t1ce'],\n",
+    "    #The labels your segmentation has\n",
+    "    seg_channels = [1,2,4],\n",
+    "\n",
+    "    #Save a gif of the brain in 3D spinning on its vertical axis\n",
+    "    gen_gif = False,\n",
+    "    #Where to output the gif of the brain with segmentations either from the annotated labels or the predicted labels\n",
+    "    true_gif_output_path = \"../../output/true\",\n",
+    "    pred_gif_output_path = \"../../output/pred\",    \n",
+    "    #Which segmentation labels to display in the gif\n",
+    "    seg_channels_to_display_gif = [1,2,4],\n",
+    "    #The angle from the horizontal axis you are looking down on the brain at as it is spinning\n",
+    "    gif_view_angle = 30,\n",
+    "    #How much the brain rotates between images of the gif\n",
+    "    gif_angle_rotation = 20,\n",
+    "    #fig size of the gif images\n",
+    "    fig_size_gif = (50,25),\n",
+    "\n",
+    "    #Save an image of slices of the brain at different views and with segmentations\n",
+    "    gen_slice = True,\n",
+    "    #where to save the generated slice image\n",
+    "    slice_output_path = \"../../output/slices\",\n",
+    "    #Fig size of the slice images\n",
+    "    fig_size_slice = (25,50),\n",
+    "    #Which seg labels to display in the slice, they will be layered in this order on the image\n",
+    "    seg_channels_to_display_slice = [2,4,1],\n",
+    "    #Which slice to display for different views of the brain\n",
+    "    sag_slice = None, #Sagittal\n",
+    "    cor_slice = None, #Coronal\n",
+    "    axi_slice = None, #Axial\n",
+    "    disp_slice_base = True, #WHether or not to display the input image in the background\n",
+    "    slice_title = None, #THe title of the slice images figure\n",
+    "\n",
+    "    gen_nifti = True, #Whether or not to generate nifti files for the input image and the segmentations\n",
+    "    nifti_output_path = \"../../output/nifti\", #WHere to ssave the nifti files\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "cameronenvironment",
+   "language": "python",
+   "name": "cameronenvironment"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.5"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}