diff --git a/app/transfer.demo.ipynb b/app/transfer.demo.ipynb new file mode 100644 index 0000000..dea6c98 --- /dev/null +++ b/app/transfer.demo.ipynb @@ -0,0 +1,1025 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "#### How to use this notebook?\n", + "\n", + "```\n", + "conda activate deepFCD\n", + "pip install jupyterlab\n", + "git clone https://github.com/NOEL-MNI/deepFCD\n", + "cd deepFCD/app\n", + "```\n", + "\n", + "- Using the GPU: cuda0\n", + " ```\n", + " THEANO_FLAGS=mode=FAST_RUN,device=cuda0,floatX=float32,dnn.enabled=False jupyter lab --no-browser .\n", + " ```\n", + "\n", + "- Using the GPU: cudaX [replace X with the desired GPU-ID]\n", + " ```\n", + " THEANO_FLAGS=mode=FAST_RUN,device=cudaX,floatX=float32,dnn.enabled=False jupyter lab --no-browser .\n", + " ```\n", + "\n", + "- Using the CPU [not recommended, runtimes can exceed several hours/days depending on the hardware]\n", + " ```\n", + " THEANO_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32,dnn.enabled=False jupyter lab --no-browser .\n", + " ```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### resources:\n", + "- https://www.tensorflow.org/guide/keras/transfer_learning\n", + "- https://learnopencv.com/keras-tutorial-fine-tuning-using-pre-trained-models/\n", + "- https://stackoverflow.com/questions/41668813/how-to-add-and-remove-new-layers-in-keras-after-loading-weights\n", + "- https://stackoverflow.com/questions/61550788/is-there-a-way-to-freeze-specific-layers-in-a-keraslayer" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import sys\n", + "import multiprocessing\n", + "from config.experiment import options\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "import time\n", + "import numpy as np\n", + "import setproctitle as spt\n", + "from tqdm import tqdm\n", + "\n", + "os.environ[\"KERAS_BACKEND\"] = \"theano\"\n", + "\n", + "# deepFCD imports\n", + "from models.noel_models_keras import *\n", + "from keras.models import load_model, Model\n", + "from keras.utils.layer_utils import count_params\n", + "from keras import backend as K\n", + "from keras.optimizers import Adadelta, Adam\n", + "from keras import losses\n", + "from keras.callbacks import EarlyStopping, CSVLogger, ModelCheckpoint, LambdaCallback\n", + "from keras.utils.np_utils import to_categorical\n", + "from keras.utils.io_utils import HDF5Matrix\n", + "from utils.metrics import *\n", + "from utils.base import *\n", + "\n", + "# deepMask imports\n", + "import torch\n", + "from mo_dots import Data\n", + "from deepMask.app.utils.data import *\n", + "from deepMask.app.utils.deepmask import *\n", + "from deepMask.app.utils.image_processing import noelImageProcessor\n", + "import deepMask.app.vnet as vnet" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Directory Organization for Training Data\n", + "```bash\n", + "/data\n", + "├── brain/ \n", + "│ ├── FCD_001_1_flair.nii.gz\n", + "│ ├── FCD_001_1_t1.nii.gz\n", + "│ ├── FCD_002_1_flair.nii.gz\n", + "│ ├── FCD_002_1_t1.nii.gz\n", + "│ ├── FCD_003_1_flair.nii.gz\n", + "│ ├── FCD_003_1_t1.nii.gz\n", + "│ ├── FCD_004_1_flair.nii.gz\n", + "│ ├── FCD_004_1_t1.nii.gz\n", + "│ ├── FCD_005_1_flair.nii.gz\n", + "│ ├── FCD_005_1_t1.nii.gz\n", + "│ ├── FCD_006_1_flair.nii.gz\n", + "│ └── FCD_006_1_t1.nii.gz\n", + "│\n", + "├── lesion_labels/\n", + "│ ├── FCD_001_1_lesion.nii.gz\n", + "│ ├── FCD_002_1_lesion.nii.gz\n", + "│ ├── FCD_003_1_lesion.nii.gz\n", + "│ ├── FCD_004_1_lesion.nii.gz\n", + "│ ├── FCD_005_1_lesion.nii.gz\n", + "│ └── FCD_006_1_lesion.nii.gz\n", + "│\n", + "├── hdf5/\n", + "│ └── exp_dropoutMC_FCD_data.h5\n", + "│ \n", + "└── noel_deepFCD_dropoutMC # [deepFCD output images for a single patient listed]\n", + " ├── FCD_001_noel_deepFCD_dropoutMC_prob_mean_0.nii.gz # [mean PROBABILITY image from CNN-1]\n", + " ├── FCD_001_noel_deepFCD_dropoutMC_prob_mean_1.nii.gz # [mean PROBABILITY image from CNN-2]\n", + " ├── FCD_001_noel_deepFCD_dropoutMC_prob_var_0.nii.gz # [mean UNCERTAINTY image from CNN-1]\n", + " └── FCD_001_noel_deepFCD_dropoutMC_prob_var_1.nii.gz # [mean UNCERTAINTY image from CNN-2]\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Note: The below configuration assumes T1 and T2 contrasts are in their native sterotaxic space, and therefore, will be preprocessed before brain extraction and FCD detection.\n", + "#### Preprocessing entails: 1) T1 and T1 co-registration to MNI template space, and 2) Intensity non-uniformity correction" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Configuration" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "experiment: noel_deepFCD_dropoutMC\n" + ] + } + ], + "source": [ + "# global configuration\n", + "args = Data()\n", + "args.dir = '/tmp/deepFCD'\n", + "args.brain_masking = False # set to True or any non-zero value for brain extraction or skull-removal, False otherwise\n", + "args.preprocess = True # co-register T1 and T2 contrasts before brain extraction\n", + "args.seed = 666\n", + "\n", + "args.train = ['mcd_131_1', 'mcd_132_1', 'mcd_133_1', 'mcd_134_1']\n", + "args.test = ['mcd_135_1', 'mcd_136_1']\n", + "cwd = os.getcwd()\n", + "\n", + "# deepFCD configuration\n", + "K.set_image_dim_ordering('th')\n", + "K.set_image_data_format('channels_first') # TH dimension ordering in this code\n", + "\n", + "options['parallel_gpu'] = False\n", + "modalities = ['T1', 'FLAIR']\n", + "x_names = options['x_names']\n", + "options[\"y_names\"] = [\"_lesion.nii.gz\"]\n", + "y_names = options[\"y_names\"]\n", + "options['dropout_mc'] = True\n", + "options['batch_size'] = 350000\n", + "options['mini_batch_size'] = 2048\n", + "options['load_checkpoint_1'] = True\n", + "options['load_checkpoint_2'] = True\n", + "\n", + "# trained model weights based on 148 histologically-verified FCD subjects\n", + "options['test_folder'] = args.dir\n", + "options['weight_paths'] = os.path.join(cwd, 'weights')\n", + "options['experiment'] = 'noel_deepFCD_dropoutMC'\n", + "spt.setproctitle(options['experiment'])\n", + "print(\"experiment: {}\".format(options['experiment']))\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Initialize the CNN, and load the model weights from disk" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "loading DNN1, model[0]: /local_raid/data/ravnoor/02_docker/deepFCD/app/weights/noel_deepFCD_dropoutMC_model_1.h5 exists\n", + "loading DNN2, model[1]: /local_raid/data/ravnoor/02_docker/deepFCD/app/weights/noel_deepFCD_dropoutMC_model_2.h5 exists\n", + "_________________________________________________________________\n", + "Layer (type) Output Shape Param # \n", + "=================================================================\n", + "input_2 (InputLayer) (None, 2, 16, 16, 16) 0 \n", + "_________________________________________________________________\n", + "conv3d_4 (Conv3D) (None, 48, 16, 16, 16) 2640 \n", + "_________________________________________________________________\n", + "batch_normalization_3 (Batch (None, 48, 16, 16, 16) 192 \n", + "_________________________________________________________________\n", + "dropout_4 (Dropout) (None, 48, 16, 16, 16) 0 \n", + "_________________________________________________________________\n", + "max_pooling3d_4 (MaxPooling3 (None, 48, 8, 8, 8) 0 \n", + "_________________________________________________________________\n", + "conv3d_5 (Conv3D) (None, 96, 8, 8, 8) 124512 \n", + "_________________________________________________________________\n", + "batch_normalization_4 (Batch (None, 96, 8, 8, 8) 384 \n", + "_________________________________________________________________\n", + "dropout_5 (Dropout) (None, 96, 8, 8, 8) 0 \n", + "_________________________________________________________________\n", + "max_pooling3d_5 (MaxPooling3 (None, 96, 4, 4, 4) 0 \n", + "_________________________________________________________________\n", + "dropout_6 (Dropout) (None, 96, 4, 4, 4) 0 \n", + "_________________________________________________________________\n", + "conv3d_6 (Conv3D) (None, 2, 4, 4, 4) 5186 \n", + "_________________________________________________________________\n", + "max_pooling3d_6 (MaxPooling3 (None, 2, 1, 1, 1) 0 \n", + "_________________________________________________________________\n", + "flatten_2 (Flatten) (None, 2) 0 \n", + "_________________________________________________________________\n", + "activation_2 (Activation) (None, 2) 0 \n", + "=================================================================\n", + "Total params: 132,914\n", + "Trainable params: 132,626\n", + "Non-trainable params: 288\n", + "_________________________________________________________________\n", + "None\n" + ] + } + ], + "source": [ + "# initialize empty model\n", + "model = None\n", + "# initialize the CNN architecture\n", + "model = off_the_shelf_model(options)\n", + "\n", + "load_weights = os.path.join(options['weight_paths'], 'noel_deepFCD_dropoutMC_model_1.h5')\n", + "print(\"loading DNN1, model[0]: {} exists\".format(load_weights)) if os.path.isfile(load_weights) else sys.exit(\"model[0]: {} doesn't exist\".format(load_weights))\n", + "model[0] = load_model(load_weights)\n", + "\n", + "load_weights = os.path.join(options['weight_paths'], 'noel_deepFCD_dropoutMC_model_2.h5')\n", + "print(\"loading DNN2, model[1]: {} exists\".format(load_weights)) if os.path.isfile(load_weights) else sys.exit(\"model[1]: {} doesn't exist\".format(load_weights))\n", + "model[1] = load_model(load_weights)\n", + "print(model[1].summary())" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "trainable layers: [conv3d_1/kernel, conv3d_1/bias, batch_normalization_1/gamma, batch_normalization_1/beta, conv3d_2/kernel, conv3d_2/bias, batch_normalization_2/gamma, batch_normalization_2/beta, conv3d_3/kernel, conv3d_3/bias]\n" + ] + } + ], + "source": [ + "print(\"trainable layers:\", model[0].trainable_weights)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Freeze the weights of selected layers\n", + "#### only 3/5 possible trainable layers for `model[1]` are frozen\n", + "You could do the same for `model[0]`\n", + "\n", + "#### Caveats for freezing the `batch_normalization` layers - [source](https://www.tensorflow.org/guide/keras/transfer_learning)\n", + "`BatchNormalization` contains 2 non-trainable weights that get updated during training. These are the variables tracking the mean and variance of the inputs.\n", + "\n", + "When you set `bn_layer.trainable = False`, the `BatchNormalization` layer will run in inference mode, and will not update its mean & variance statistics. This is not the case for other layers in general, as [weight trainability & inference/training modes are two orthogonal concepts](https://keras.io/getting_started/faq/#whats-the-difference-between-the-training-argument-in-call-and-the-trainable-attribute). But the two are tied in the case of the `BatchNormalization` layer.\n", + "\n", + "When you unfreeze a model that contains `BatchNormalization` layers in order to do fine-tuning, you should keep the `BatchNormalization` layers in inference mode by passing training=False when calling the base model. Otherwise the updates applied to the non-trainable weights will suddenly destroy what the model has learned.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "conv3d_1\n", + "batch_normalization_1\n", + "conv3d_2\n" + ] + } + ], + "source": [ + "trainable_layers = ['conv3d_1', 'batch_normalization_1', 'conv3d_2', 'batch_normalization_2' 'conv3d_3']\n", + "freeze_layers = ['conv3d_1', 'conv3d_2']\n", + "\n", + "model[1].trainable = True\n", + "for layer in model[0].layers:\n", + " # selecting layer by name\n", + " for frozen in freeze_layers:\n", + " if layer.name == frozen:\n", + " print(frozen)\n", + " layer.trainable = False \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " input_1 False\n", + " conv3d_1 False\n", + " batch_normalization_1 False\n", + " dropout_1 True\n", + " max_pooling3d_1 True\n", + " conv3d_2 False\n", + " batch_normalization_2 True\n", + " dropout_2 True\n", + " max_pooling3d_2 True\n", + " dropout_3 True\n", + " conv3d_3 True\n", + " max_pooling3d_3 True\n", + " flatten_1 True\n", + " activation_1 True\n" + ] + } + ], + "source": [ + "# Check the trainable status of the individual layers\n", + "for layer in model[0].layers:\n", + " print('{:>25} {:}'.format(layer.name, layer.trainable))\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Total params: 132,914\n", + "Trainable params: 5,378\n", + "Non-trainable params: 127,536\n" + ] + } + ], + "source": [ + "trainable_count = count_params(model[0].trainable_weights)\n", + "non_trainable_count = count_params(model[0].non_trainable_weights)\n", + "print('Total params: {:,}'.format(trainable_count + non_trainable_count))\n", + "print('Trainable params: {:,}'.format(trainable_count))\n", + "print('Non-trainable params: {:,}'.format(non_trainable_count))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Remove all the layers after `conv3d_3` layer, and add back the `MaxPooling3D()`, `Flatten()`, and `Activation()` layers\n", + "#### This isn't entriely necessary, but gives you the flexibility to add your own custom or standard layers" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "layer_name = 'conv3d_3'\n", + "# base_model = Model(inputs=model[0].input, outputs=model[0].get_layer(layer_name).output)\n", + "x = MaxPooling3D((4, 4, 4))(model[0].get_layer(layer_name).output)\n", + "x = Flatten()(x)\n", + "# Output\n", + "out = Activation('softmax')(x)\n", + "new_model = Model(inputs=model[0].input, output=[out])" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "_________________________________________________________________\n", + "Layer (type) Output Shape Param # \n", + "=================================================================\n", + "input_1 (InputLayer) (None, 2, 16, 16, 16) 0 \n", + "_________________________________________________________________\n", + "conv3d_1 (Conv3D) (None, 48, 16, 16, 16) 2640 \n", + "_________________________________________________________________\n", + "batch_normalization_1 (Batch (None, 48, 16, 16, 16) 192 \n", + "_________________________________________________________________\n", + "dropout_1 (Dropout) (None, 48, 16, 16, 16) 0 \n", + "_________________________________________________________________\n", + "max_pooling3d_1 (MaxPooling3 (None, 48, 8, 8, 8) 0 \n", + "_________________________________________________________________\n", + "conv3d_2 (Conv3D) (None, 96, 8, 8, 8) 124512 \n", + "_________________________________________________________________\n", + "batch_normalization_2 (Batch (None, 96, 8, 8, 8) 384 \n", + "_________________________________________________________________\n", + "dropout_2 (Dropout) (None, 96, 8, 8, 8) 0 \n", + "_________________________________________________________________\n", + "max_pooling3d_2 (MaxPooling3 (None, 96, 4, 4, 4) 0 \n", + "_________________________________________________________________\n", + "dropout_3 (Dropout) (None, 96, 4, 4, 4) 0 \n", + "_________________________________________________________________\n", + "conv3d_3 (Conv3D) (None, 2, 4, 4, 4) 5186 \n", + "_________________________________________________________________\n", + "max_pooling3d_7 (MaxPooling3 (None, 2, 1, 1, 1) 0 \n", + "_________________________________________________________________\n", + "flatten_3 (Flatten) (None, 2) 0 \n", + "_________________________________________________________________\n", + "activation_3 (Activation) (None, 2) 0 \n", + "=================================================================\n", + "Total params: 132,914\n", + "Trainable params: 5,378\n", + "Non-trainable params: 127,536\n", + "_________________________________________________________________\n", + "None\n" + ] + } + ], + "source": [ + "print(new_model.summary())\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "new_model.compile(optimizer=Adadelta(), loss=losses.binary_crossentropy, metrics=['accuracy'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Specify your own data to use to train the partially frozen model" + ] + }, + { + "cell_type": "code", + "execution_count": 63, + "metadata": {}, + "outputs": [], + "source": [ + "options[\"main_dir\"] = args.dir\n", + "options[\"model_dir\"] = os.path.join(options[\"main_dir\"], \"models\")\n", + "# save the sampled patches as a HDF5 dataset for faster subsequent experiments\n", + "options[\"save_as_hdf5\"] = True \n", + "options[\"hdf5_data_dir\"] = os.path.join(options[\"main_dir\"], \"data\", \"hdf5\")\n", + "\n", + "options[\"compute_performance\"] = False\n", + "sensitivity = 0\n", + "perf = {}\n", + "\n", + "options[\"train_folder\"] = os.path.join(options[\"main_dir\"], \"data\")\n", + "options[\"test_folder\"] = os.path.join(options[\"main_dir\"], \"data\")" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "train = args.train\n", + "\n", + "options[\"load_checkpoint_1\"] = False\n", + "options[\"load_checkpoint_2\"] = False\n", + "# options['continue_training_2'] = True\n", + "# options['initial_epoch_2'] = 69\n", + "\n", + "train_list = []\n", + "train_data = {}\n", + "\n", + "for i in train:\n", + " train_list.append(i)\n", + "\n", + "train_data = {\n", + " f: {\n", + " m: os.path.join(options[\"train_folder\"], \"brain\", f + n)\n", + " for m, n in zip(modalities, x_names)\n", + " }\n", + " for f in train_list\n", + "}\n", + "train_labels = {\n", + " f: os.path.join(options[\"train_folder\"], \"lesion_labels\", f + y_names[0])\n", + " for f in train_list\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "({'mcd_131_1': {'T1': '/tmp/deepFCD/data/brain/mcd_131_1_t1.nii.gz',\n", + " 'FLAIR': '/tmp/deepFCD/data/brain/mcd_131_1_flair.nii.gz'},\n", + " 'mcd_132_1': {'T1': '/tmp/deepFCD/data/brain/mcd_132_1_t1.nii.gz',\n", + " 'FLAIR': '/tmp/deepFCD/data/brain/mcd_132_1_flair.nii.gz'},\n", + " 'mcd_133_1': {'T1': '/tmp/deepFCD/data/brain/mcd_133_1_t1.nii.gz',\n", + " 'FLAIR': '/tmp/deepFCD/data/brain/mcd_133_1_flair.nii.gz'},\n", + " 'mcd_134_1': {'T1': '/tmp/deepFCD/data/brain/mcd_134_1_t1.nii.gz',\n", + " 'FLAIR': '/tmp/deepFCD/data/brain/mcd_134_1_flair.nii.gz'},\n", + " 'mcd_135_1': {'T1': '/tmp/deepFCD/data/brain/mcd_135_1_t1.nii.gz',\n", + " 'FLAIR': '/tmp/deepFCD/data/brain/mcd_135_1_flair.nii.gz'}},\n", + " {'mcd_131_1': '/tmp/deepFCD/data/lesion_labels/mcd_131_1_lesion.nii.gz',\n", + " 'mcd_132_1': '/tmp/deepFCD/data/lesion_labels/mcd_132_1_lesion.nii.gz',\n", + " 'mcd_133_1': '/tmp/deepFCD/data/lesion_labels/mcd_133_1_lesion.nii.gz',\n", + " 'mcd_134_1': '/tmp/deepFCD/data/lesion_labels/mcd_134_1_lesion.nii.gz',\n", + " 'mcd_135_1': '/tmp/deepFCD/data/lesion_labels/mcd_135_1_lesion.nii.gz'})" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "train_data, train_labels" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load data from individual NIfTI images and extract patches" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "====> # 3D training patches: 34002 \n", + "\n", + "====> # patch size: (16, 16, 16) \n", + "\n", + "====> # modalities: 2 \n", + "\n" + ] + } + ], + "source": [ + "X, labels = load_training_data(train_data, train_labels, options, subcort_masks=None)\n", + "y = to_categorical(labels, num_classes=2)\n", + "\n", + "print( '====> # 3D training patches:', X.shape[0] ,'\\n' )\n", + "print( '====> # patch size:', (X.shape[2],X.shape[3],X.shape[4]) ,'\\n' )\n", + "print( '====> # modalities:', (X.shape[1]) ,'\\n' )" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "options['mini_batch_size'] = 2048\n", + "batch_size = options['mini_batch_size']//2\n", + "\n", + "net_model = 'model_1'\n", + "options['weight_paths'] = options[\"model_dir\"]\n", + "net_weights = os.path.join(options['weight_paths'], 'checkpoints') + '/' + net_model + '_weights.h5'\n", + "\n", + "net_logs = os.path.join(options['weight_paths'], 'logs')\n", + "if not os.path.exists(net_logs):\n", + " os.mkdir(os.path.join(options['weight_paths'], 'checkpoints'))\n", + " os.mkdir(net_logs)\n", + "\n", + "RAND = time.strftime('%a''_' '%H_%M_%S')\n", + "early_stopping_monitor, model_checkpoint, csv_logger, json_logging_callback = model_callbacks(net_weights, net_model, net_logs, options, RAND)" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "====> DNN1 // fitting model \n", + "\n", + "Train on 25501 samples, validate on 8501 samples\n", + "Epoch 1/30\n", + " - 4s - loss: 3.4262e-04 - acc: 0.9999 - val_loss: 6.9768e-05 - val_acc: 1.0000\n", + "Epoch 2/30\n", + " - 4s - loss: 8.1720e-04 - acc: 0.9997 - val_loss: 9.8393e-05 - val_acc: 1.0000\n", + "Epoch 3/30\n", + " - 4s - loss: 2.8628e-04 - acc: 1.0000 - val_loss: 1.1803e-04 - val_acc: 1.0000\n", + "Epoch 4/30\n", + " - 4s - loss: 4.3838e-04 - acc: 0.9998 - val_loss: 1.5212e-04 - val_acc: 0.9999\n", + "Epoch 5/30\n", + " - 4s - loss: 3.4361e-04 - acc: 0.9999 - val_loss: 1.4174e-04 - val_acc: 1.0000\n", + "Epoch 6/30\n", + " - 4s - loss: 3.9617e-04 - acc: 0.9998 - val_loss: 6.1269e-05 - val_acc: 1.0000\n", + "Epoch 7/30\n", + " - 4s - loss: 3.3724e-04 - acc: 0.9998 - val_loss: 6.0597e-05 - val_acc: 1.0000\n", + "Epoch 8/30\n", + " - 4s - loss: 3.3114e-04 - acc: 0.9998 - val_loss: 1.3225e-04 - val_acc: 1.0000\n", + "Epoch 9/30\n", + " - 4s - loss: 3.9629e-04 - acc: 0.9998 - val_loss: 2.1273e-04 - val_acc: 0.9999\n", + "Epoch 10/30\n", + " - 4s - loss: 3.9247e-04 - acc: 0.9999 - val_loss: 2.9889e-04 - val_acc: 0.9999\n", + "Epoch 11/30\n", + " - 4s - loss: 1.3790e-04 - acc: 1.0000 - val_loss: 3.1026e-04 - val_acc: 0.9999\n", + "Epoch 12/30\n", + " - 4s - loss: 3.3594e-04 - acc: 0.9999 - val_loss: 2.8004e-04 - val_acc: 0.9999\n", + "Epoch 13/30\n", + " - 4s - loss: 3.7390e-04 - acc: 0.9998 - val_loss: 2.6697e-04 - val_acc: 0.9999\n", + "Epoch 14/30\n", + " - 4s - loss: 2.9064e-04 - acc: 0.9999 - val_loss: 3.6187e-04 - val_acc: 0.9999\n", + "Epoch 15/30\n", + " - 4s - loss: 2.8135e-04 - acc: 0.9998 - val_loss: 2.6578e-04 - val_acc: 0.9999\n", + "Epoch 16/30\n", + " - 4s - loss: 1.3952e-04 - acc: 1.0000 - val_loss: 1.3757e-04 - val_acc: 1.0000\n", + "Epoch 17/30\n", + " - 4s - loss: 1.5295e-04 - acc: 1.0000 - val_loss: 7.6673e-05 - val_acc: 1.0000\n", + "Epoch 18/30\n", + " - 4s - loss: 4.9732e-04 - acc: 0.9998 - val_loss: 1.9993e-04 - val_acc: 0.9999\n", + "Epoch 19/30\n", + " - 4s - loss: 4.1472e-04 - acc: 0.9999 - val_loss: 1.9775e-04 - val_acc: 1.0000\n", + "Epoch 20/30\n", + " - 4s - loss: 3.8020e-04 - acc: 0.9998 - val_loss: 1.0378e-04 - val_acc: 1.0000\n", + "Epoch 21/30\n", + " - 4s - loss: 2.2130e-04 - acc: 1.0000 - val_loss: 9.2387e-05 - val_acc: 1.0000\n", + "Epoch 22/30\n", + " - 4s - loss: 1.5499e-04 - acc: 1.0000 - val_loss: 6.4528e-05 - val_acc: 1.0000\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 37, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "print( '====> DNN1 // fitting model', '\\n' )\n", + "options['max_epochs_1'] = options['max_epochs_1']//2\n", + "new_model.fit(\n", + " X, y, batch_size=batch_size, epochs=options['max_epochs_1'],\n", + " verbose=2, shuffle=\"batch\", validation_split=options['train_split'], # validation_data=(X_val, y_val),\n", + " callbacks=[early_stopping_monitor, model_checkpoint]\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Fine-tune the model with all layers \"unfrozen\", with a much smaller learning rate" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " input_1 True\n", + " conv3d_1 True\n", + " batch_normalization_1 False\n", + " dropout_1 True\n", + " max_pooling3d_1 True\n", + " conv3d_2 True\n", + " batch_normalization_2 False\n", + " dropout_2 True\n", + " max_pooling3d_2 True\n", + " dropout_3 True\n", + " conv3d_3 True\n", + " max_pooling3d_7 True\n", + " flatten_3 True\n", + " activation_3 True\n" + ] + } + ], + "source": [ + "# Unfreeze the base model\n", + "# new_model.trainable = True \n", + "for layer in new_model.layers:\n", + " # selecting layer by name\n", + " if layer.name.startswith('batch_normalization'):\n", + " layer.trainable = False\n", + " else:\n", + " layer.trainable = True\n", + "\n", + "# It's important to recompile your model after you make any changes\n", + "# to the `trainable` attribute of any inner layer, so that your changes\n", + "# are taken into account\n", + "new_model.compile(optimizer=Adam(1e-5), loss=losses.binary_crossentropy, metrics=['accuracy'])\n", + "\n", + "# Check the trainable status of the individual layers\n", + "for layer in new_model.layers:\n", + " print('{:>25} {:}'.format(layer.name, layer.trainable))" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "====> DNN1 // fine-tuning the model \n", + "\n", + "Train on 25501 samples, validate on 8501 samples\n", + "Epoch 1/30\n", + " - 7s - loss: 1.8968e-04 - acc: 1.0000 - val_loss: 7.5791e-05 - val_acc: 1.0000\n", + "Epoch 2/30\n", + " - 7s - loss: 2.8420e-04 - acc: 0.9999 - val_loss: 7.7897e-05 - val_acc: 1.0000\n", + "Epoch 3/30\n", + " - 7s - loss: 3.2759e-04 - acc: 0.9998 - val_loss: 7.7422e-05 - val_acc: 1.0000\n", + "Epoch 4/30\n", + " - 7s - loss: 1.4612e-04 - acc: 1.0000 - val_loss: 7.5621e-05 - val_acc: 1.0000\n", + "Epoch 5/30\n", + " - 7s - loss: 8.4724e-05 - acc: 1.0000 - val_loss: 7.5989e-05 - val_acc: 1.0000\n", + "Epoch 6/30\n", + " - 7s - loss: 1.5421e-04 - acc: 1.0000 - val_loss: 7.7371e-05 - val_acc: 1.0000\n", + "Epoch 7/30\n", + " - 7s - loss: 3.9578e-04 - acc: 0.9998 - val_loss: 7.7833e-05 - val_acc: 1.0000\n", + "Epoch 8/30\n", + " - 7s - loss: 1.4936e-04 - acc: 1.0000 - val_loss: 8.1342e-05 - val_acc: 1.0000\n", + "Epoch 9/30\n", + " - 7s - loss: 1.2544e-04 - acc: 1.0000 - val_loss: 8.5506e-05 - val_acc: 1.0000\n", + "Epoch 10/30\n", + " - 7s - loss: 1.8176e-04 - acc: 1.0000 - val_loss: 8.6907e-05 - val_acc: 1.0000\n", + "Epoch 11/30\n", + " - 7s - loss: 3.1595e-04 - acc: 0.9999 - val_loss: 8.8001e-05 - val_acc: 1.0000\n", + "Epoch 12/30\n", + " - 7s - loss: 2.4715e-04 - acc: 0.9999 - val_loss: 9.1182e-05 - val_acc: 1.0000\n", + "Epoch 13/30\n", + " - 7s - loss: 1.5350e-04 - acc: 1.0000 - val_loss: 8.8197e-05 - val_acc: 1.0000\n", + "Epoch 14/30\n", + " - 7s - loss: 1.2230e-04 - acc: 1.0000 - val_loss: 8.9314e-05 - val_acc: 1.0000\n", + "Epoch 15/30\n", + " - 7s - loss: 4.2130e-04 - acc: 0.9999 - val_loss: 8.7817e-05 - val_acc: 1.0000\n", + "Epoch 16/30\n", + " - 7s - loss: 1.1709e-04 - acc: 1.0000 - val_loss: 8.8120e-05 - val_acc: 1.0000\n", + "Epoch 17/30\n", + " - 7s - loss: 3.1857e-04 - acc: 0.9998 - val_loss: 8.1530e-05 - val_acc: 1.0000\n", + "Epoch 18/30\n", + " - 7s - loss: 1.6583e-04 - acc: 1.0000 - val_loss: 7.4969e-05 - val_acc: 1.0000\n", + "Epoch 19/30\n", + " - 7s - loss: 1.2297e-04 - acc: 1.0000 - val_loss: 7.4916e-05 - val_acc: 1.0000\n", + "Epoch 20/30\n", + " - 7s - loss: 2.6551e-04 - acc: 0.9999 - val_loss: 7.3176e-05 - val_acc: 1.0000\n", + "Epoch 21/30\n", + " - 8s - loss: 3.7169e-04 - acc: 0.9998 - val_loss: 7.2829e-05 - val_acc: 1.0000\n", + "Epoch 22/30\n", + " - 7s - loss: 2.8312e-04 - acc: 0.9998 - val_loss: 7.2505e-05 - val_acc: 1.0000\n", + "Epoch 23/30\n", + " - 7s - loss: 4.3607e-04 - acc: 0.9999 - val_loss: 8.1683e-05 - val_acc: 1.0000\n", + "Epoch 24/30\n", + " - 7s - loss: 2.4847e-04 - acc: 0.9998 - val_loss: 8.5767e-05 - val_acc: 1.0000\n", + "Epoch 25/30\n", + " - 8s - loss: 2.2980e-04 - acc: 0.9999 - val_loss: 9.3081e-05 - val_acc: 1.0000\n", + "Epoch 26/30\n", + " - 7s - loss: 3.0892e-04 - acc: 0.9999 - val_loss: 9.4079e-05 - val_acc: 1.0000\n", + "Epoch 27/30\n", + " - 8s - loss: 1.5757e-04 - acc: 1.0000 - val_loss: 8.6999e-05 - val_acc: 1.0000\n", + "Epoch 28/30\n", + " - 7s - loss: 1.4620e-04 - acc: 1.0000 - val_loss: 8.9785e-05 - val_acc: 1.0000\n", + "Epoch 29/30\n", + " - 8s - loss: 1.5903e-04 - acc: 0.9999 - val_loss: 9.1534e-05 - val_acc: 1.0000\n", + "Epoch 30/30\n", + " - 7s - loss: 8.7605e-05 - acc: 1.0000 - val_loss: 9.1985e-05 - val_acc: 1.0000\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 50, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Train end-to-end using your new dataset\n", + "print( '====> DNN1 // fine-tuning the model', '\\n')\n", + "options['max_epochs_1'] = options['max_epochs_1']//2\n", + "new_model.fit(\n", + " X, y, batch_size=batch_size, epochs=options['max_epochs_1'],\n", + " verbose=2, shuffle=\"batch\", validation_split=options['train_split'],\n", + " callbacks=[early_stopping_monitor, model_checkpoint]\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load the test data and get predictions from fine-tuned CNN model" + ] + }, + { + "cell_type": "code", + "execution_count": 64, + "metadata": {}, + "outputs": [], + "source": [ + "test = args.test\n", + "\n", + "test_list = []\n", + "test_data = {}\n", + "\n", + "for i in test:\n", + " test_list.append(i)\n", + "\n", + "test_data = {\n", + " f: {\n", + " m: os.path.join(options[\"test_folder\"], \"brain\", f + n)\n", + " for m, n in zip(modalities, x_names)\n", + " }\n", + " for f in test_list\n", + "}\n", + "test_labels = {\n", + " f: os.path.join(options[\"test_folder\"], \"lesion_labels\", f + y_names[0])\n", + " for f in test_list\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "({'mcd_135_1': {'T1': '/tmp/deepFCD/data/brain/mcd_135_1_t1.nii.gz',\n", + " 'FLAIR': '/tmp/deepFCD/data/brain/mcd_135_1_flair.nii.gz'},\n", + " 'mcd_136_1': {'T1': '/tmp/deepFCD/data/brain/mcd_136_1_t1.nii.gz',\n", + " 'FLAIR': '/tmp/deepFCD/data/brain/mcd_136_1_flair.nii.gz'}},\n", + " ['mcd_135_1', 'mcd_136_1'])" + ] + }, + "execution_count": 65, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "test_data, test_list" + ] + }, + { + "cell_type": "code", + "execution_count": 66, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "serving predictions using the trained model: 0%|\u001b[34m \u001b[0m| 0/2 [00:00\u001b[0;34m\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'-'\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0;36m70\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 29\u001b[0;31m \u001b[0mtest_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnew_models\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mt_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 30\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0mend\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/local_raid/data/ravnoor/02_docker/deepFCD/app/utils/base.py\u001b[0m in \u001b[0;36mtest_model\u001b[0;34m(model, test_x_data, options, performance, uncertainty)\u001b[0m\n\u001b[1;32m 237\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'test_var_name'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mscan\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'experiment'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m'_prob_var_0.nii.gz'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 238\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 239\u001b[0;31m \u001b[0mt1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtest_scan\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_x_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msave_nifti\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0muncertainty\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0muncertainty\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mT\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m20\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 240\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 241\u001b[0m \u001b[0;31m# second network\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/local_raid/data/ravnoor/02_docker/deepFCD/app/utils/base.py\u001b[0m in \u001b[0;36mtest_scan\u001b[0;34m(model, test_x_data, options, transit, save_nifti, uncertainty, candidate_mask, T)\u001b[0m\n\u001b[1;32m 288\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0muncertainty\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 289\u001b[0m \u001b[0;31m# predict uncertainty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 290\u001b[0;31m \u001b[0my_pred\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_pred_var\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpredict_uncertainty\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mT\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mT\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 291\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 292\u001b[0m \u001b[0my_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqueeze\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mverbose\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/local_raid/data/ravnoor/02_docker/deepFCD/app/utils/patch_dataloader.py\u001b[0m in \u001b[0;36mpredict_uncertainty\u001b[0;34m(model, data, batch_size, T)\u001b[0m\n\u001b[1;32m 96\u001b[0m \u001b[0mK\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_image_data_format\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'channels_first'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 97\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 98\u001b[0;31m \u001b[0mYt_hat\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mpredict_stochastic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf_stochastic\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mT\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mascii\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdesc\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"predict_stochastic\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcolour\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'green'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 99\u001b[0m \u001b[0mMC_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mYt_hat\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 100\u001b[0m \u001b[0mMC_pred_var\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvar\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mYt_hat\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/local_raid/data/ravnoor/02_docker/deepFCD/app/utils/patch_dataloader.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 96\u001b[0m \u001b[0mK\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_image_data_format\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'channels_first'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 97\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 98\u001b[0;31m \u001b[0mYt_hat\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mpredict_stochastic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf_stochastic\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mT\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mascii\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdesc\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"predict_stochastic\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcolour\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'green'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 99\u001b[0m \u001b[0mMC_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mYt_hat\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 100\u001b[0m \u001b[0mMC_pred_var\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvar\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mYt_hat\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/local_raid/data/ravnoor/02_docker/deepFCD/app/utils/patch_dataloader.py\u001b[0m in \u001b[0;36mpredict_stochastic\u001b[0;34m(f, ins, batch_size, verbose)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0mbatch_ids\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mindex_array\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mbatch_start\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mbatch_end\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 117\u001b[0m \u001b[0mins_batch\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mslice_X\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mins\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_ids\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 118\u001b[0;31m \u001b[0mbatch_outs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mins_batch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 119\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_outs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0mbatch_outs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mbatch_outs\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/host/hamlet/local_raid/data/ravnoor/00_toolbox/anaconda3/envs/deepFCDtest/lib/python3.7/site-packages/keras/backend/theano_backend.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 1386\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1387\u001b[0m \u001b[0;32massert\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mlist\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtuple\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1388\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfunction\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1389\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1390\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/host/hamlet/local_raid/data/ravnoor/00_toolbox/anaconda3/envs/deepFCDtest/lib/python3.7/site-packages/theano/compile/function_module.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 901\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 902\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m\u001b[0;31m\\\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 903\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0moutput_subset\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;31m\\\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 904\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput_subset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moutput_subset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 905\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/host/hamlet/local_raid/data/ravnoor/00_toolbox/anaconda3/envs/deepFCDtest/lib/python3.7/site-packages/theano/ifelse.py\u001b[0m in \u001b[0;36mthunk\u001b[0;34m()\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnode\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 244\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 245\u001b[0;31m \u001b[0;32mdef\u001b[0m \u001b[0mthunk\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 246\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mcompute_map\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcond\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 247\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "# model_pair is a list of models to be used for inference\n", + "# here we replace model[0] with our new_model\n", + "# in its vannila form, old_model = [model[0] model[1]]\n", + "new_models = [new_model, model[1]]\n", + "\n", + "for _, scan in enumerate(tqdm(test_list, desc='serving predictions using the trained model', colour='blue')):\n", + " t_data = {}\n", + " t_data[scan] = test_data[scan]\n", + "\n", + " options['pred_folder'] = os.path.join(options['test_folder'], scan, options['experiment'])\n", + " if not os.path.exists(options['pred_folder']):\n", + " os.makedirs(options['pred_folder'])\n", + "\n", + " pred_mean_fname = os.path.join(options['pred_folder'], scan + '_prob_mean_1.nii.gz')\n", + " pred_var_fname = os.path.join(options['pred_folder'], scan + '_prob_var_1.nii.gz')\n", + "\n", + " if np.logical_and(os.path.isfile(pred_mean_fname), os.path.isfile(pred_var_fname)):\n", + " print(\"prediction for {} already exists\".format(scan))\n", + " continue\n", + "\n", + " options['test_scan'] = scan\n", + "\n", + " start = time.time()\n", + " print('\\n')\n", + " print('-'*70)\n", + " print(\"testing the model for scan: {}\".format(scan))\n", + " print('-'*70)\n", + "\n", + " test_model(new_models, t_data, options)\n", + "\n", + " end = time.time()\n", + " diff = (end - start) // 60\n", + " print(\"-\"*70)\n", + " print(\"time elapsed: ~ {} minutes\".format(diff))\n", + " print(\"-\"*70)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}