Switch to unified view

a b/scripts/InitializationNotebook.ipynb
1
{
2
 "cells": [
3
  {
4
   "cell_type": "code",
5
   "execution_count": 4,
6
   "metadata": {},
7
   "outputs": [],
8
   "source": [
9
    "# David Ouyang 12/5/2019\n",
10
    "\n",
11
    "# Notebook which:\n",
12
    "# 1. Downloads weights\n",
13
    "# 2. Initializes model and imports weights\n",
14
    "# 3. Performs test time evaluation of videos (already preprocessed with ConvertDICOMToAVI.ipynb)\n",
15
    "\n",
16
    "import re\n",
17
    "import os, os.path\n",
18
    "from os.path import splitext\n",
19
    "import pydicom as dicom\n",
20
    "import numpy as np\n",
21
    "from pydicom.uid import UID, generate_uid\n",
22
    "import shutil\n",
23
    "from multiprocessing import dummy as multiprocessing\n",
24
    "import time\n",
25
    "import subprocess\n",
26
    "import datetime\n",
27
    "from datetime import date\n",
28
    "import sys\n",
29
    "import cv2\n",
30
    "import matplotlib.pyplot as plt\n",
31
    "import sys\n",
32
    "from shutil import copy\n",
33
    "import math\n",
34
    "import torch\n",
35
    "import torchvision\n",
36
    "\n",
37
    "sys.path.append(\"..\")\n",
38
    "import echonet\n",
39
    "\n",
40
    "import wget \n",
41
    "\n",
42
    "#destinationFolder = \"/Users/davidouyang/Dropbox/Echo Research/CodeBase/Output\"\n",
43
    "destinationFolder = \"C:\\\\Users\\\\Windows\\\\Dropbox\\\\Echo Research\\\\CodeBase\\\\Output\"\n",
44
    "#videosFolder = \"/Users/davidouyang/Dropbox/Echo Research/CodeBase/a4c-video-dir\"\n",
45
    "videosFolder = \"C:\\\\Users\\\\Windows\\\\Dropbox\\\\Echo Research\\\\CodeBase\\\\a4c-video-dir\"\n",
46
    "#DestinationForWeights = \"/Users/davidouyang/Dropbox/Echo Research/CodeBase/EchoNetDynamic-Weights\"\n",
47
    "DestinationForWeights = \"C:\\\\Users\\\\Windows\\\\Dropbox\\\\Echo Research\\\\CodeBase\\\\EchoNetDynamic-Weights\""
48
   ]
49
  },
50
  {
51
   "cell_type": "code",
52
   "execution_count": 5,
53
   "metadata": {},
54
   "outputs": [
55
    {
56
     "name": "stdout",
57
     "output_type": "stream",
58
     "text": [
59
      "The weights are at C:\\Users\\Windows\\Dropbox\\Echo Research\\CodeBase\\EchoNetDynamic-Weights\n",
60
      "Segmentation Weights already present\n",
61
      "EF Weights already present\n"
62
     ]
63
    }
64
   ],
65
   "source": [
66
    "# Download model weights\n",
67
    "\n",
68
    "if os.path.exists(DestinationForWeights):\n",
69
    "    print(\"The weights are at\", DestinationForWeights)\n",
70
    "else:\n",
71
    "    print(\"Creating folder at \", DestinationForWeights, \" to store weights\")\n",
72
    "    os.mkdir(DestinationForWeights)\n",
73
    "    \n",
74
    "segmentationWeightsURL = 'https://github.com/douyang/EchoNetDynamic/releases/download/v1.0.0/deeplabv3_resnet50_random.pt'\n",
75
    "ejectionFractionWeightsURL = 'https://github.com/douyang/EchoNetDynamic/releases/download/v1.0.0/r2plus1d_18_32_2_pretrained.pt'\n",
76
    "\n",
77
    "\n",
78
    "if not os.path.exists(os.path.join(DestinationForWeights, os.path.basename(segmentationWeightsURL))):\n",
79
    "    print(\"Downloading Segmentation Weights, \", segmentationWeightsURL,\" to \",os.path.join(DestinationForWeights,os.path.basename(segmentationWeightsURL)))\n",
80
    "    filename = wget.download(segmentationWeightsURL, out = DestinationForWeights)\n",
81
    "else:\n",
82
    "    print(\"Segmentation Weights already present\")\n",
83
    "    \n",
84
    "if not os.path.exists(os.path.join(DestinationForWeights, os.path.basename(ejectionFractionWeightsURL))):\n",
85
    "    print(\"Downloading EF Weights, \", ejectionFractionWeightsURL,\" to \",os.path.join(DestinationForWeights,os.path.basename(ejectionFractionWeightsURL)))\n",
86
    "    filename = wget.download(ejectionFractionWeightsURL, out = DestinationForWeights)\n",
87
    "else:\n",
88
    "    print(\"EF Weights already present\")\n",
89
    "        \n"
90
   ]
91
  },
92
  {
93
   "cell_type": "code",
94
   "execution_count": 6,
95
   "metadata": {},
96
   "outputs": [
97
    {
98
     "name": "stdout",
99
     "output_type": "stream",
100
     "text": [
101
      "loading weights from  C:\\Users\\Windows\\Dropbox\\Echo Research\\CodeBase\\EchoNetDynamic-Weights\\r2plus1d_18_32_2_pretrained\n",
102
      "cuda is available, original weights\n",
103
      "external_test ['0X1A05DFFFCAFB253B.avi', '0X1A0A263B22CCD966.avi', '0X1A2A76BDB5B98BED.avi', '0X1A2C60147AF9FDAE.avi', '0X1A2E9496910EFF5B.avi', '0X1A3D565B371DC573.avi', '0X1A3E7BF1DFB132FB.avi', '0X1A5FAE3F9D37794E.avi', '0X1A6ACFE7B286DAFC.avi', '0X1A8D85542DBE8204.avi', '23_Apical_4_chamber_view.dcm.avi', '62_Apical_4_chamber_view.dcm.avi', '64_Apical_4_chamber_view.dcm.avi']\n"
104
     ]
105
    },
106
    {
107
     "name": "stderr",
108
     "output_type": "stream",
109
     "text": [
110
      "100%|██████████████████████████████████████████████████████████████████████████████████| 10/10 [00:10<00:00,  1.00s/it]\n",
111
      "100%|████████████████████████████████████████████████████████| 13/13 [00:29<00:00,  2.26s/it, 3122.29 (3440.26) / 0.00]\n"
112
     ]
113
    }
114
   ],
115
   "source": [
116
    "# Initialize and Run EF model\n",
117
    "\n",
118
    "frames = 32\n",
119
    "period = 1 #2\n",
120
    "batch_size = 20\n",
121
    "model = torchvision.models.video.r2plus1d_18(pretrained=False)\n",
122
    "model.fc = torch.nn.Linear(model.fc.in_features, 1)\n",
123
    "\n",
124
    "\n",
125
    "\n",
126
    "print(\"loading weights from \", os.path.join(DestinationForWeights, \"r2plus1d_18_32_2_pretrained\"))\n",
127
    "\n",
128
    "if torch.cuda.is_available():\n",
129
    "    print(\"cuda is available, original weights\")\n",
130
    "    device = torch.device(\"cuda\")\n",
131
    "    model = torch.nn.DataParallel(model)\n",
132
    "    model.to(device)\n",
133
    "    checkpoint = torch.load(os.path.join(DestinationForWeights, os.path.basename(ejectionFractionWeightsURL)))\n",
134
    "    model.load_state_dict(checkpoint['state_dict'])\n",
135
    "else:\n",
136
    "    print(\"cuda is not available, cpu weights\")\n",
137
    "    device = torch.device(\"cpu\")\n",
138
    "    checkpoint = torch.load(os.path.join(DestinationForWeights, os.path.basename(ejectionFractionWeightsURL)), map_location = \"cpu\")\n",
139
    "    state_dict_cpu = {k[7:]: v for (k, v) in checkpoint['state_dict'].items()}\n",
140
    "    model.load_state_dict(state_dict_cpu)\n",
141
    "\n",
142
    "\n",
143
    "# try some random weights: final_r2+1d_model_regression_EF_sgd_skip1_32frames.pth.tar\n",
144
    "# scp ouyangd@arthur2:~/Echo-Tracing-Analysis/final_r2+1d_model_regression_EF_sgd_skip1_32frames.pth.tar \"C:\\Users\\Windows\\Dropbox\\Echo Research\\CodeBase\\EchoNetDynamic-Weights\"\n",
145
    "#Weights = \"final_r2+1d_model_regression_EF_sgd_skip1_32frames.pth.tar\"\n",
146
    "\n",
147
    "\n",
148
    "output = os.path.join(destinationFolder, \"cedars_ef_output.csv\")\n",
149
    "\n",
150
    "ds = echonet.datasets.Echo(split = \"external_test\", external_test_location = videosFolder, crops=\"all\")\n",
151
    "print(ds.split, ds.fnames)\n",
152
    "\n",
153
    "mean, std = echonet.utils.get_mean_and_std(ds)\n",
154
    "\n",
155
    "kwargs = {\"target_type\": \"EF\",\n",
156
    "          \"mean\": mean,\n",
157
    "          \"std\": std,\n",
158
    "          \"length\": frames,\n",
159
    "          \"period\": period,\n",
160
    "          }\n",
161
    "\n",
162
    "ds = echonet.datasets.Echo(split = \"external_test\", external_test_location = videosFolder, **kwargs, crops=\"all\")\n",
163
    "\n",
164
    "test_dataloader = torch.utils.data.DataLoader(ds, batch_size = 1, num_workers = 5, shuffle = True, pin_memory=(device.type == \"cuda\"))\n",
165
    "loss, yhat, y = echonet.utils.video.run_epoch(model, test_dataloader, \"test\", None, device, save_all=True, blocks=25)\n",
166
    "\n",
167
    "with open(output, \"w\") as g:\n",
168
    "    for (filename, pred) in zip(ds.fnames, yhat):\n",
169
    "        for (i,p) in enumerate(pred):\n",
170
    "            g.write(\"{},{},{:.4f}\\n\".format(filename, i, p))\n"
171
   ]
172
  },
173
  {
174
   "cell_type": "code",
175
   "execution_count": null,
176
   "metadata": {},
177
   "outputs": [],
178
   "source": [
179
    "# Initialize and Run Segmentation model\n",
180
    "\n",
181
    "torch.cuda.empty_cache()\n",
182
    "\n",
183
    "\n",
184
    "videosFolder = \"C:\\\\Users\\\\Windows\\\\Dropbox\\\\Echo Research\\\\CodeBase\\\\View Classification\\\\AppearsA4c\\\\Resized2\"\n",
185
    "\n",
186
    "def collate_fn(x):\n",
187
    "    x, f = zip(*x)\n",
188
    "    i = list(map(lambda t: t.shape[1], x))\n",
189
    "    x = torch.as_tensor(np.swapaxes(np.concatenate(x, 1), 0, 1))\n",
190
    "    return x, f, i\n",
191
    "\n",
192
    "dataloader = torch.utils.data.DataLoader(echonet.datasets.Echo(split=\"external_test\", external_test_location = videosFolder, target_type=[\"Filename\"], length=None, period=1, mean=mean, std=std),\n",
193
    "                                         batch_size=10, num_workers=0, shuffle=False, pin_memory=(device.type == \"cuda\"), collate_fn=collate_fn)\n",
194
    "if not all([os.path.isfile(os.path.join(destinationFolder, \"labels\", os.path.splitext(f)[0] + \".npy\")) for f in dataloader.dataset.fnames]):\n",
195
    "    # Save segmentations for all frames\n",
196
    "    # Only run if missing files\n",
197
    "\n",
198
    "    pathlib.Path(os.path.join(destinationFolder, \"labels\")).mkdir(parents=True, exist_ok=True)\n",
199
    "    block = 1024\n",
200
    "    model.eval()\n",
201
    "\n",
202
    "    with torch.no_grad():\n",
203
    "        for (x, f, i) in tqdm.tqdm(dataloader):\n",
204
    "            x = x.to(device)\n",
205
    "            y = np.concatenate([model(x[i:(i + block), :, :, :])[\"out\"].detach().cpu().numpy() for i in range(0, x.shape[0], block)]).astype(np.float16)\n",
206
    "            start = 0\n",
207
    "            for (filename, offset) in zip(f, i):\n",
208
    "                np.save(os.path.join(destinationFolder, \"labels\", os.path.splitext(filename)[0]), y[start:(start + offset), 0, :, :])\n",
209
    "                start += offset\n",
210
    "                \n",
211
    "dataloader = torch.utils.data.DataLoader(echonet.datasets.Echo(split=\"external_test\", external_test_location = videosFolder, target_type=[\"Filename\"], length=None, period=1, segmentation=os.path.join(destinationFolder, \"labels\")),\n",
212
    "                                         batch_size=1, num_workers=8, shuffle=False, pin_memory=False)\n",
213
    "if not all(os.path.isfile(os.path.join(destinationFolder, \"videos\", f)) for f in dataloader.dataset.fnames):\n",
214
    "    pathlib.Path(os.path.join(destinationFolder, \"videos\")).mkdir(parents=True, exist_ok=True)\n",
215
    "    pathlib.Path(os.path.join(destinationFolder, \"size\")).mkdir(parents=True, exist_ok=True)\n",
216
    "    echonet.utils.latexify()\n",
217
    "    with open(os.path.join(destinationFolder, \"size.csv\"), \"w\") as g:\n",
218
    "        g.write(\"Filename,Frame,Size,ComputerSmall\\n\")\n",
219
    "        for (x, filename) in tqdm.tqdm(dataloader):\n",
220
    "            x = x.numpy()\n",
221
    "            for i in range(len(filename)):\n",
222
    "                img = x[i, :, :, :, :].copy()\n",
223
    "                logit = img[2, :, :, :].copy()\n",
224
    "                img[1, :, :, :] = img[0, :, :, :]\n",
225
    "                img[2, :, :, :] = img[0, :, :, :]\n",
226
    "                img = np.concatenate((img, img), 3)\n",
227
    "                img[0, :, :, 112:] = np.maximum(255. * (logit > 0), img[0, :, :, 112:])\n",
228
    "\n",
229
    "                img = np.concatenate((img, np.zeros_like(img)), 2)\n",
230
    "                size = (logit > 0).sum(2).sum(1)\n",
231
    "                try:\n",
232
    "                    trim_min = sorted(size)[round(len(size) ** 0.05)]\n",
233
    "                except:\n",
234
    "                    import code; code.interact(local=dict(globals(), **locals()))\n",
235
    "                trim_max = sorted(size)[round(len(size) ** 0.95)]\n",
236
    "                trim_range = trim_max - trim_min\n",
237
    "                peaks = set(scipy.signal.find_peaks(-size, distance=20, prominence=(0.50 * trim_range))[0])\n",
238
    "                for (x, y) in enumerate(size):\n",
239
    "                    g.write(\"{},{},{},{}\\n\".format(filename[0], x, y, 1 if x in peaks else 0))\n",
240
    "                fig = plt.figure(figsize=(size.shape[0] / 50 * 1.5, 3))\n",
241
    "                plt.scatter(np.arange(size.shape[0]) / 50, size, s=1)\n",
242
    "                ylim = plt.ylim()\n",
243
    "                for p in peaks:\n",
244
    "                    plt.plot(np.array([p, p]) / 50, ylim, linewidth=1)\n",
245
    "                plt.ylim(ylim)\n",
246
    "                plt.title(os.path.splitext(filename[i])[0])\n",
247
    "                plt.xlabel(\"Seconds\")\n",
248
    "                plt.ylabel(\"Size (pixels)\")\n",
249
    "                plt.tight_layout()\n",
250
    "                plt.savefig(os.path.join(destinationFolder, \"size\", os.path.splitext(filename[i])[0] + \".pdf\"))\n",
251
    "                plt.close(fig)\n",
252
    "                size -= size.min()\n",
253
    "                size = size / size.max()\n",
254
    "                size = 1 - size\n",
255
    "                for (x, y) in enumerate(size):\n",
256
    "                    img[:, :, int(round(115 + 100 * y)), int(round(x / len(size) * 200 + 10))] = 255.\n",
257
    "                    interval = np.array([-3, -2, -1, 0, 1, 2, 3])\n",
258
    "                    for a in interval:\n",
259
    "                        for b in interval:\n",
260
    "                            img[:, x, a + int(round(115 + 100 * y)), b + int(round(x / len(size) * 200 + 10))] = 255.\n",
261
    "                    if x in peaks:\n",
262
    "                        img[:, :, 200:225, b + int(round(x / len(size) * 200 + 10))] = 255.\n",
263
    "                echonet.utils.savevideo(os.path.join(destinationFolder, \"videos\", filename[i]), img.astype(np.uint8), 50)                "
264
   ]
265
  }
266
 ],
267
 "metadata": {
268
  "kernelspec": {
269
   "display_name": "Python 3",
270
   "language": "python",
271
   "name": "python3"
272
  },
273
  "language_info": {
274
   "codemirror_mode": {
275
    "name": "ipython",
276
    "version": 3
277
   },
278
   "file_extension": ".py",
279
   "mimetype": "text/x-python",
280
   "name": "python",
281
   "nbconvert_exporter": "python",
282
   "pygments_lexer": "ipython3",
283
   "version": "3.7.4"
284
  }
285
 },
286
 "nbformat": 4,
287
 "nbformat_minor": 2
288
}