Switch to side-by-side view

--- a
+++ b/scripts/InitializationNotebook.ipynb
@@ -0,0 +1,288 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# David Ouyang 12/5/2019\n",
+    "\n",
+    "# Notebook which:\n",
+    "# 1. Downloads weights\n",
+    "# 2. Initializes model and imports weights\n",
+    "# 3. Performs test time evaluation of videos (already preprocessed with ConvertDICOMToAVI.ipynb)\n",
+    "\n",
+    "import re\n",
+    "import os, os.path\n",
+    "from os.path import splitext\n",
+    "import pydicom as dicom\n",
+    "import numpy as np\n",
+    "from pydicom.uid import UID, generate_uid\n",
+    "import shutil\n",
+    "from multiprocessing import dummy as multiprocessing\n",
+    "import time\n",
+    "import subprocess\n",
+    "import datetime\n",
+    "from datetime import date\n",
+    "import sys\n",
+    "import cv2\n",
+    "import matplotlib.pyplot as plt\n",
+    "import sys\n",
+    "from shutil import copy\n",
+    "import math\n",
+    "import torch\n",
+    "import torchvision\n",
+    "\n",
+    "sys.path.append(\"..\")\n",
+    "import echonet\n",
+    "\n",
+    "import wget \n",
+    "\n",
+    "#destinationFolder = \"/Users/davidouyang/Dropbox/Echo Research/CodeBase/Output\"\n",
+    "destinationFolder = \"C:\\\\Users\\\\Windows\\\\Dropbox\\\\Echo Research\\\\CodeBase\\\\Output\"\n",
+    "#videosFolder = \"/Users/davidouyang/Dropbox/Echo Research/CodeBase/a4c-video-dir\"\n",
+    "videosFolder = \"C:\\\\Users\\\\Windows\\\\Dropbox\\\\Echo Research\\\\CodeBase\\\\a4c-video-dir\"\n",
+    "#DestinationForWeights = \"/Users/davidouyang/Dropbox/Echo Research/CodeBase/EchoNetDynamic-Weights\"\n",
+    "DestinationForWeights = \"C:\\\\Users\\\\Windows\\\\Dropbox\\\\Echo Research\\\\CodeBase\\\\EchoNetDynamic-Weights\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "The weights are at C:\\Users\\Windows\\Dropbox\\Echo Research\\CodeBase\\EchoNetDynamic-Weights\n",
+      "Segmentation Weights already present\n",
+      "EF Weights already present\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Download model weights\n",
+    "\n",
+    "if os.path.exists(DestinationForWeights):\n",
+    "    print(\"The weights are at\", DestinationForWeights)\n",
+    "else:\n",
+    "    print(\"Creating folder at \", DestinationForWeights, \" to store weights\")\n",
+    "    os.mkdir(DestinationForWeights)\n",
+    "    \n",
+    "segmentationWeightsURL = 'https://github.com/douyang/EchoNetDynamic/releases/download/v1.0.0/deeplabv3_resnet50_random.pt'\n",
+    "ejectionFractionWeightsURL = 'https://github.com/douyang/EchoNetDynamic/releases/download/v1.0.0/r2plus1d_18_32_2_pretrained.pt'\n",
+    "\n",
+    "\n",
+    "if not os.path.exists(os.path.join(DestinationForWeights, os.path.basename(segmentationWeightsURL))):\n",
+    "    print(\"Downloading Segmentation Weights, \", segmentationWeightsURL,\" to \",os.path.join(DestinationForWeights,os.path.basename(segmentationWeightsURL)))\n",
+    "    filename = wget.download(segmentationWeightsURL, out = DestinationForWeights)\n",
+    "else:\n",
+    "    print(\"Segmentation Weights already present\")\n",
+    "    \n",
+    "if not os.path.exists(os.path.join(DestinationForWeights, os.path.basename(ejectionFractionWeightsURL))):\n",
+    "    print(\"Downloading EF Weights, \", ejectionFractionWeightsURL,\" to \",os.path.join(DestinationForWeights,os.path.basename(ejectionFractionWeightsURL)))\n",
+    "    filename = wget.download(ejectionFractionWeightsURL, out = DestinationForWeights)\n",
+    "else:\n",
+    "    print(\"EF Weights already present\")\n",
+    "        \n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "loading weights from  C:\\Users\\Windows\\Dropbox\\Echo Research\\CodeBase\\EchoNetDynamic-Weights\\r2plus1d_18_32_2_pretrained\n",
+      "cuda is available, original weights\n",
+      "external_test ['0X1A05DFFFCAFB253B.avi', '0X1A0A263B22CCD966.avi', '0X1A2A76BDB5B98BED.avi', '0X1A2C60147AF9FDAE.avi', '0X1A2E9496910EFF5B.avi', '0X1A3D565B371DC573.avi', '0X1A3E7BF1DFB132FB.avi', '0X1A5FAE3F9D37794E.avi', '0X1A6ACFE7B286DAFC.avi', '0X1A8D85542DBE8204.avi', '23_Apical_4_chamber_view.dcm.avi', '62_Apical_4_chamber_view.dcm.avi', '64_Apical_4_chamber_view.dcm.avi']\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "100%|██████████████████████████████████████████████████████████████████████████████████| 10/10 [00:10<00:00,  1.00s/it]\n",
+      "100%|████████████████████████████████████████████████████████| 13/13 [00:29<00:00,  2.26s/it, 3122.29 (3440.26) / 0.00]\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Initialize and Run EF model\n",
+    "\n",
+    "frames = 32\n",
+    "period = 1 #2\n",
+    "batch_size = 20\n",
+    "model = torchvision.models.video.r2plus1d_18(pretrained=False)\n",
+    "model.fc = torch.nn.Linear(model.fc.in_features, 1)\n",
+    "\n",
+    "\n",
+    "\n",
+    "print(\"loading weights from \", os.path.join(DestinationForWeights, \"r2plus1d_18_32_2_pretrained\"))\n",
+    "\n",
+    "if torch.cuda.is_available():\n",
+    "    print(\"cuda is available, original weights\")\n",
+    "    device = torch.device(\"cuda\")\n",
+    "    model = torch.nn.DataParallel(model)\n",
+    "    model.to(device)\n",
+    "    checkpoint = torch.load(os.path.join(DestinationForWeights, os.path.basename(ejectionFractionWeightsURL)))\n",
+    "    model.load_state_dict(checkpoint['state_dict'])\n",
+    "else:\n",
+    "    print(\"cuda is not available, cpu weights\")\n",
+    "    device = torch.device(\"cpu\")\n",
+    "    checkpoint = torch.load(os.path.join(DestinationForWeights, os.path.basename(ejectionFractionWeightsURL)), map_location = \"cpu\")\n",
+    "    state_dict_cpu = {k[7:]: v for (k, v) in checkpoint['state_dict'].items()}\n",
+    "    model.load_state_dict(state_dict_cpu)\n",
+    "\n",
+    "\n",
+    "# try some random weights: final_r2+1d_model_regression_EF_sgd_skip1_32frames.pth.tar\n",
+    "# scp ouyangd@arthur2:~/Echo-Tracing-Analysis/final_r2+1d_model_regression_EF_sgd_skip1_32frames.pth.tar \"C:\\Users\\Windows\\Dropbox\\Echo Research\\CodeBase\\EchoNetDynamic-Weights\"\n",
+    "#Weights = \"final_r2+1d_model_regression_EF_sgd_skip1_32frames.pth.tar\"\n",
+    "\n",
+    "\n",
+    "output = os.path.join(destinationFolder, \"cedars_ef_output.csv\")\n",
+    "\n",
+    "ds = echonet.datasets.Echo(split = \"external_test\", external_test_location = videosFolder, crops=\"all\")\n",
+    "print(ds.split, ds.fnames)\n",
+    "\n",
+    "mean, std = echonet.utils.get_mean_and_std(ds)\n",
+    "\n",
+    "kwargs = {\"target_type\": \"EF\",\n",
+    "          \"mean\": mean,\n",
+    "          \"std\": std,\n",
+    "          \"length\": frames,\n",
+    "          \"period\": period,\n",
+    "          }\n",
+    "\n",
+    "ds = echonet.datasets.Echo(split = \"external_test\", external_test_location = videosFolder, **kwargs, crops=\"all\")\n",
+    "\n",
+    "test_dataloader = torch.utils.data.DataLoader(ds, batch_size = 1, num_workers = 5, shuffle = True, pin_memory=(device.type == \"cuda\"))\n",
+    "loss, yhat, y = echonet.utils.video.run_epoch(model, test_dataloader, \"test\", None, device, save_all=True, blocks=25)\n",
+    "\n",
+    "with open(output, \"w\") as g:\n",
+    "    for (filename, pred) in zip(ds.fnames, yhat):\n",
+    "        for (i,p) in enumerate(pred):\n",
+    "            g.write(\"{},{},{:.4f}\\n\".format(filename, i, p))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Initialize and Run Segmentation model\n",
+    "\n",
+    "torch.cuda.empty_cache()\n",
+    "\n",
+    "\n",
+    "videosFolder = \"C:\\\\Users\\\\Windows\\\\Dropbox\\\\Echo Research\\\\CodeBase\\\\View Classification\\\\AppearsA4c\\\\Resized2\"\n",
+    "\n",
+    "def collate_fn(x):\n",
+    "    x, f = zip(*x)\n",
+    "    i = list(map(lambda t: t.shape[1], x))\n",
+    "    x = torch.as_tensor(np.swapaxes(np.concatenate(x, 1), 0, 1))\n",
+    "    return x, f, i\n",
+    "\n",
+    "dataloader = torch.utils.data.DataLoader(echonet.datasets.Echo(split=\"external_test\", external_test_location = videosFolder, target_type=[\"Filename\"], length=None, period=1, mean=mean, std=std),\n",
+    "                                         batch_size=10, num_workers=0, shuffle=False, pin_memory=(device.type == \"cuda\"), collate_fn=collate_fn)\n",
+    "if not all([os.path.isfile(os.path.join(destinationFolder, \"labels\", os.path.splitext(f)[0] + \".npy\")) for f in dataloader.dataset.fnames]):\n",
+    "    # Save segmentations for all frames\n",
+    "    # Only run if missing files\n",
+    "\n",
+    "    pathlib.Path(os.path.join(destinationFolder, \"labels\")).mkdir(parents=True, exist_ok=True)\n",
+    "    block = 1024\n",
+    "    model.eval()\n",
+    "\n",
+    "    with torch.no_grad():\n",
+    "        for (x, f, i) in tqdm.tqdm(dataloader):\n",
+    "            x = x.to(device)\n",
+    "            y = np.concatenate([model(x[i:(i + block), :, :, :])[\"out\"].detach().cpu().numpy() for i in range(0, x.shape[0], block)]).astype(np.float16)\n",
+    "            start = 0\n",
+    "            for (filename, offset) in zip(f, i):\n",
+    "                np.save(os.path.join(destinationFolder, \"labels\", os.path.splitext(filename)[0]), y[start:(start + offset), 0, :, :])\n",
+    "                start += offset\n",
+    "                \n",
+    "dataloader = torch.utils.data.DataLoader(echonet.datasets.Echo(split=\"external_test\", external_test_location = videosFolder, target_type=[\"Filename\"], length=None, period=1, segmentation=os.path.join(destinationFolder, \"labels\")),\n",
+    "                                         batch_size=1, num_workers=8, shuffle=False, pin_memory=False)\n",
+    "if not all(os.path.isfile(os.path.join(destinationFolder, \"videos\", f)) for f in dataloader.dataset.fnames):\n",
+    "    pathlib.Path(os.path.join(destinationFolder, \"videos\")).mkdir(parents=True, exist_ok=True)\n",
+    "    pathlib.Path(os.path.join(destinationFolder, \"size\")).mkdir(parents=True, exist_ok=True)\n",
+    "    echonet.utils.latexify()\n",
+    "    with open(os.path.join(destinationFolder, \"size.csv\"), \"w\") as g:\n",
+    "        g.write(\"Filename,Frame,Size,ComputerSmall\\n\")\n",
+    "        for (x, filename) in tqdm.tqdm(dataloader):\n",
+    "            x = x.numpy()\n",
+    "            for i in range(len(filename)):\n",
+    "                img = x[i, :, :, :, :].copy()\n",
+    "                logit = img[2, :, :, :].copy()\n",
+    "                img[1, :, :, :] = img[0, :, :, :]\n",
+    "                img[2, :, :, :] = img[0, :, :, :]\n",
+    "                img = np.concatenate((img, img), 3)\n",
+    "                img[0, :, :, 112:] = np.maximum(255. * (logit > 0), img[0, :, :, 112:])\n",
+    "\n",
+    "                img = np.concatenate((img, np.zeros_like(img)), 2)\n",
+    "                size = (logit > 0).sum(2).sum(1)\n",
+    "                try:\n",
+    "                    trim_min = sorted(size)[round(len(size) ** 0.05)]\n",
+    "                except:\n",
+    "                    import code; code.interact(local=dict(globals(), **locals()))\n",
+    "                trim_max = sorted(size)[round(len(size) ** 0.95)]\n",
+    "                trim_range = trim_max - trim_min\n",
+    "                peaks = set(scipy.signal.find_peaks(-size, distance=20, prominence=(0.50 * trim_range))[0])\n",
+    "                for (x, y) in enumerate(size):\n",
+    "                    g.write(\"{},{},{},{}\\n\".format(filename[0], x, y, 1 if x in peaks else 0))\n",
+    "                fig = plt.figure(figsize=(size.shape[0] / 50 * 1.5, 3))\n",
+    "                plt.scatter(np.arange(size.shape[0]) / 50, size, s=1)\n",
+    "                ylim = plt.ylim()\n",
+    "                for p in peaks:\n",
+    "                    plt.plot(np.array([p, p]) / 50, ylim, linewidth=1)\n",
+    "                plt.ylim(ylim)\n",
+    "                plt.title(os.path.splitext(filename[i])[0])\n",
+    "                plt.xlabel(\"Seconds\")\n",
+    "                plt.ylabel(\"Size (pixels)\")\n",
+    "                plt.tight_layout()\n",
+    "                plt.savefig(os.path.join(destinationFolder, \"size\", os.path.splitext(filename[i])[0] + \".pdf\"))\n",
+    "                plt.close(fig)\n",
+    "                size -= size.min()\n",
+    "                size = size / size.max()\n",
+    "                size = 1 - size\n",
+    "                for (x, y) in enumerate(size):\n",
+    "                    img[:, :, int(round(115 + 100 * y)), int(round(x / len(size) * 200 + 10))] = 255.\n",
+    "                    interval = np.array([-3, -2, -1, 0, 1, 2, 3])\n",
+    "                    for a in interval:\n",
+    "                        for b in interval:\n",
+    "                            img[:, x, a + int(round(115 + 100 * y)), b + int(round(x / len(size) * 200 + 10))] = 255.\n",
+    "                    if x in peaks:\n",
+    "                        img[:, :, 200:225, b + int(round(x / len(size) * 200 + 10))] = 255.\n",
+    "                echonet.utils.savevideo(os.path.join(destinationFolder, \"videos\", filename[i]), img.astype(np.uint8), 50)                "
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.4"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}