[78ef36]: / docs / mil / index.html

Download this file

711 lines (542 with data), 56.3 kB

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta name="generator" content="Docutils 0.18.1: http://docutils.sourceforge.net/" />
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Multiple-Instance Learning (MIL) &mdash; slideflow 3.0.0 documentation</title>
<link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
<!-- <link rel="stylesheet" href="../_static/pygments.css" type="text/css" /> -->
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
<link rel="index" title="Index" href="../genindex/" />
<link rel="search" title="Search" href="../search/" />
<link rel="next" title="Self-Supervised Learning (SSL)" href="../ssl/" />
<link rel="prev" title="Generating Features" href="../features/" />
<script src="../_static/js/modernizr.min.js"></script>
<!-- Preload the theme fonts -->
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<!-- Preload the katex fonts -->
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.15.2/css/all.css" integrity="sha384-vSIIfh2YWi9wW0r9iZe7RJPrKwp6bG+s9QZMoITbCckVJqGCCRhc+ccxNcdpHuYu" crossorigin="anonymous">
<script defer data-domain="slideflow.dev" src="https://plausible.io/js/script.js"></script>
</head>
<div class="container-fluid header-holder tutorials-header" id="header-holder">
<div class="container">
<div class="header-container">
<a class="header-logo" href="https://slideflow.dev" aria-label="Slideflow"></a>
<div class="main-menu">
<ul>
<li class="active">
<a href="https://slideflow.dev">Docs</a>
</li>
<li>
<a href="https://slideflow.dev/tutorial1/">Tutorials</a>
</li>
<li>
<a href="https://github.com/slideflow/slideflow">GitHub</a>
</li>
</ul>
</div>
<a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
</div>
</div>
</div>
<body class="pytorch-body">
<div class="table-of-contents-link-wrapper">
<span>Table of Contents</span>
<a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
</div>
<nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
<div class="pytorch-side-scroll">
<div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
<div class="pytorch-left-menu-search">
<div class="version">
3.0
</div>
<div role="search">
<form id="rtd-search-form" class="wy-form" action="../search/" method="get">
<input type="text" name="q" placeholder="Search Docs" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
</div>
<p class="caption" role="heading"><span class="caption-text">Introduction</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../installation/">Installation</a></li>
<li class="toctree-l1"><a class="reference internal" href="../overview/">Overview</a></li>
<li class="toctree-l1"><a class="reference internal" href="../quickstart/">Quickstart</a></li>
<li class="toctree-l1"><a class="reference internal" href="../project_setup/">Setting up a Project</a></li>
<li class="toctree-l1"><a class="reference internal" href="../datasets_and_val/">Datasets</a></li>
<li class="toctree-l1"><a class="reference internal" href="../slide_processing/">Slide Processing</a></li>
<li class="toctree-l1"><a class="reference internal" href="../training/">Training</a></li>
<li class="toctree-l1"><a class="reference internal" href="../evaluation/">Evaluation</a></li>
<li class="toctree-l1"><a class="reference internal" href="../posthoc/">Layer Activations</a></li>
<li class="toctree-l1"><a class="reference internal" href="../uq/">Uncertainty Quantification</a></li>
<li class="toctree-l1"><a class="reference internal" href="../features/">Generating Features</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">Multiple-Instance Learning (MIL)</a></li>
<li class="toctree-l1"><a class="reference internal" href="../ssl/">Self-Supervised Learning (SSL)</a></li>
<li class="toctree-l1"><a class="reference internal" href="../stylegan/">Generative Networks (GANs)</a></li>
<li class="toctree-l1"><a class="reference internal" href="../saliency/">Saliency Maps</a></li>
<li class="toctree-l1"><a class="reference internal" href="../segmentation/">Tissue Segmentation</a></li>
<li class="toctree-l1"><a class="reference internal" href="../cellseg/">Cell Segmentation</a></li>
<li class="toctree-l1"><a class="reference internal" href="../custom_loops/">Custom Training Loops</a></li>
<li class="toctree-l1"><a class="reference internal" href="../studio/">Slideflow Studio: Live Visualization</a></li>
<li class="toctree-l1"><a class="reference internal" href="../troubleshooting/">Troubleshooting</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Developer Notes</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../tfrecords/">TFRecords: Reading and Writing</a></li>
<li class="toctree-l1"><a class="reference internal" href="../dataloaders/">Dataloaders: Sampling and Augmentation</a></li>
<li class="toctree-l1"><a class="reference internal" href="../custom_extractors/">Custom Feature Extractors</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tile_labels/">Strong Supervision with Tile Labels</a></li>
<li class="toctree-l1"><a class="reference internal" href="../plugins/">Creating a Slideflow Plugin</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../slideflow/">slideflow</a></li>
<li class="toctree-l1"><a class="reference internal" href="../project/">slideflow.Project</a></li>
<li class="toctree-l1"><a class="reference internal" href="../dataset/">slideflow.Dataset</a></li>
<li class="toctree-l1"><a class="reference internal" href="../dataset_features/">slideflow.DatasetFeatures</a></li>
<li class="toctree-l1"><a class="reference internal" href="../heatmap/">slideflow.Heatmap</a></li>
<li class="toctree-l1"><a class="reference internal" href="../model_params/">slideflow.ModelParams</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mosaic/">slideflow.Mosaic</a></li>
<li class="toctree-l1"><a class="reference internal" href="../slidemap/">slideflow.SlideMap</a></li>
<li class="toctree-l1"><a class="reference internal" href="../biscuit/">slideflow.biscuit</a></li>
<li class="toctree-l1"><a class="reference internal" href="../slideflow_cellseg/">slideflow.cellseg</a></li>
<li class="toctree-l1"><a class="reference internal" href="../io/">slideflow.io</a></li>
<li class="toctree-l1"><a class="reference internal" href="../io_tensorflow/">slideflow.io.tensorflow</a></li>
<li class="toctree-l1"><a class="reference internal" href="../io_torch/">slideflow.io.torch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../gan/">slideflow.gan</a></li>
<li class="toctree-l1"><a class="reference internal" href="../grad/">slideflow.grad</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mil_module/">slideflow.mil</a></li>
<li class="toctree-l1"><a class="reference internal" href="../model/">slideflow.model</a></li>
<li class="toctree-l1"><a class="reference internal" href="../model_tensorflow/">slideflow.model.tensorflow</a></li>
<li class="toctree-l1"><a class="reference internal" href="../model_torch/">slideflow.model.torch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../norm/">slideflow.norm</a></li>
<li class="toctree-l1"><a class="reference internal" href="../simclr/">slideflow.simclr</a></li>
<li class="toctree-l1"><a class="reference internal" href="../slide/">slideflow.slide</a></li>
<li class="toctree-l1"><a class="reference internal" href="../slide_qc/">slideflow.slide.qc</a></li>
<li class="toctree-l1"><a class="reference internal" href="../stats/">slideflow.stats</a></li>
<li class="toctree-l1"><a class="reference internal" href="../util/">slideflow.util</a></li>
<li class="toctree-l1"><a class="reference internal" href="../studio_module/">slideflow.studio</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Tutorials</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../tutorial1/">Tutorial 1: Model training (simple)</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorial2/">Tutorial 2: Model training (advanced)</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorial3/">Tutorial 3: Using a custom architecture</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorial4/">Tutorial 4: Model evaluation &amp; heatmaps</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorial5/">Tutorial 5: Creating a mosaic map</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorial6/">Tutorial 6: Custom slide filtering</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorial7/">Tutorial 7: Training with custom augmentations</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorial8/">Tutorial 8: Multiple-Instance Learning</a></li>
</ul>
</div>
</div>
</nav>
<div class="pytorch-container">
<div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
<div class="pytorch-breadcrumbs-wrapper">
<div role="navigation" aria-label="breadcrumbs navigation">
<ul class="pytorch-breadcrumbs">
<li>
<a href="../">
Docs
</a> &gt;
</li>
<li>Multiple-Instance Learning (MIL)</li>
<li class="pytorch-breadcrumbs-aside">
<a href="../_sources/mil.rst.txt" rel="nofollow"><img src="../_static/images/view-page-source-icon.svg"></a>
</li>
</ul>
</div>
</div>
<div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
Shortcuts
</div>
</div>
<section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
<div class="pytorch-content-left">
<div class="rst-content">
<div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
<article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
<section id="multiple-instance-learning-mil">
<span id="mil"></span><h1>Multiple-Instance Learning (MIL)<a class="headerlink" href="#multiple-instance-learning-mil" title="Permalink to this heading"></a></h1>
<p>In addition to standard tile-based neural networks, Slideflow also supports training multiple-instance learning (MIL) models. Several architectures are available, including <a class="reference external" href="https://github.com/AMLab-Amsterdam/AttentionDeepMIL">attention-based MIL</a> (<code class="docutils literal notranslate"><span class="pre">&quot;Attention_MIL&quot;</span></code>), <a class="reference external" href="https://github.com/mahmoodlab/CLAM">CLAM</a> (<code class="docutils literal notranslate"><span class="pre">&quot;CLAM_SB&quot;,</span></code> <code class="docutils literal notranslate"><span class="pre">&quot;CLAM_MB&quot;</span></code>, <code class="docutils literal notranslate"><span class="pre">&quot;MIL_fc&quot;</span></code>, <code class="docutils literal notranslate"><span class="pre">&quot;MIL_fc_mc&quot;</span></code>), <a class="reference external" href="https://github.com/szc19990412/TransMIL">TransMIL</a> (<code class="docutils literal notranslate"><span class="pre">&quot;TransMIL&quot;</span></code>), and <a class="reference external" href="https://github.com/peng-lab/HistoBistro">HistoBistro Transformer</a> (<code class="docutils literal notranslate"><span class="pre">&quot;bistro.transformer&quot;</span></code>). Custom architectures can also be trained. MIL training requires PyTorch.</p>
<p>Skip to <a class="reference internal" href="../tutorial8/#tutorial8"><span class="std std-ref">Tutorial 8: Multiple-Instance Learning</span></a> for a complete example of MIL training.</p>
<p>See <a class="reference internal" href="../mil_module/#mil-api"><span class="std std-ref">slideflow.mil</span></a> for more information on the MIL API.</p>
<section id="generating-features">
<h2>Generating Features<a class="headerlink" href="#generating-features" title="Permalink to this heading"></a></h2>
<p>The first step in MIL model development is generating features from image tiles, as discussed in the <a class="reference internal" href="../features/#features"><span class="std std-ref">Generating Features</span></a> section. Features from whole-slide images are exported as “bags” of features, where each bag contains a set of features from a single slide. Each bag is a PyTorch tensor saved in <code class="docutils literal notranslate"><span class="pre">*.pt</span></code> format. Bags are saved in a directory, and the directory path is passed to the MIL model during training and evaluation.</p>
</section>
<section id="training">
<h2>Training<a class="headerlink" href="#training" title="Permalink to this heading"></a></h2>
<section id="model-configuration">
<h3>Model Configuration<a class="headerlink" href="#model-configuration" title="Permalink to this heading"></a></h3>
<p>To train an MIL model using exported features, first prepare an MIL configuration using <a class="reference internal" href="../mil_module/#slideflow.mil.mil_config" title="slideflow.mil.mil_config"><code class="xref py py-func docutils literal notranslate"><span class="pre">slideflow.mil.mil_config()</span></code></a>.</p>
<p>The first argument to this function is the model architecture (which can be a name or a custom <code class="docutils literal notranslate"><span class="pre">torch.nn.Module</span></code> model), and the remaining arguments are used to configure the training process, such as learning rate and number of epochs. Training is executed using <a class="reference external" href="https://docs.fast.ai/">FastAI</a> with <a class="reference external" href="https://arxiv.org/pdf/1803.09820.pdf%E5%92%8CSylvain">1cycle learning rate scheduling</a>.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">slideflow</span> <span class="k">as</span> <span class="nn">sf</span>
<span class="kn">from</span> <span class="nn">slideflow.mil</span> <span class="kn">import</span> <span class="n">mil_config</span>
<span class="n">config</span> <span class="o">=</span> <span class="n">mil_config</span><span class="p">(</span><span class="s1">&#39;attention_mil&#39;</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="mf">1e-3</span><span class="p">)</span>
</pre></div>
</div>
<p>Available models out-of-the-box include <a class="reference external" href="https://github.com/AMLab-Amsterdam/AttentionDeepMIL">attention-based MIL</a> (<code class="docutils literal notranslate"><span class="pre">&quot;Attention_MIL&quot;</span></code>), <a class="reference external" href="https://github.com/szc19990412/TransMIL">transformer MIL</a> (<code class="docutils literal notranslate"><span class="pre">&quot;TransMIL&quot;</span></code>), and <a class="reference external" href="https://github.com/peng-lab/HistoBistro">HistoBistro Transformer</a> (<code class="docutils literal notranslate"><span class="pre">&quot;bistro.transformer&quot;</span></code>). <a class="reference external" href="https://github.com/mahmoodlab/CLAM">CLAM</a> (<code class="docutils literal notranslate"><span class="pre">&quot;CLAM_SB&quot;,</span></code> <code class="docutils literal notranslate"><span class="pre">&quot;CLAM_MB&quot;</span></code>, <code class="docutils literal notranslate"><span class="pre">&quot;MIL_fc&quot;</span></code>, <code class="docutils literal notranslate"><span class="pre">&quot;MIL_fc_mc&quot;</span></code>) models are available through <code class="docutils literal notranslate"><span class="pre">slideflow-gpl</span></code>:</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>pip<span class="w"> </span>install<span class="w"> </span>slideflow-gpl
</pre></div>
</div>
<p>Custom MIL models can also be trained with this API, as discussed <a class="reference external" href="#custom_mil">below</a>.</p>
</section>
<section id="classification-regression">
<h3>Classification &amp; Regression<a class="headerlink" href="#classification-regression" title="Permalink to this heading"></a></h3>
<p>MIL models can be trained for both classification and regression tasks. The type of outcome is determined through the loss function, which defaults to <code class="docutils literal notranslate"><span class="pre">&quot;cross_entropy&quot;</span></code>. To train a model for regression, set the loss function to one of the following regression losses, and ensure that your outcome labels are continuous. You can also train to multiple outcomes by passing a list of outcome names.</p>
<ul class="simple">
<li><p><strong>“mse”</strong> (<code class="docutils literal notranslate"><span class="pre">nn.CrossEntropyLoss</span></code>): Mean squared error.</p></li>
<li><p><strong>“mae”</strong> (<code class="docutils literal notranslate"><span class="pre">nn.L1Loss</span></code>): Mean absolute error.</p></li>
<li><p><strong>“huber”</strong> (<code class="docutils literal notranslate"><span class="pre">nn.SmoothL1Loss</span></code>): Huber loss.</p></li>
</ul>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="c1"># Prepare a regression-compatible MIL configuration</span>
<span class="n">config</span> <span class="o">=</span> <span class="n">mil_config</span><span class="p">(</span><span class="s1">&#39;attention_mil&#39;</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="mf">1e-3</span><span class="p">,</span> <span class="n">loss</span><span class="o">=</span><span class="s1">&#39;mse&#39;</span><span class="p">)</span>
<span class="c1"># Train the model</span>
<span class="n">project</span><span class="o">.</span><span class="n">train_mil</span><span class="p">(</span>
<span class="n">config</span><span class="o">=</span><span class="n">config</span><span class="p">,</span>
<span class="o">...</span><span class="p">,</span>
<span class="n">outcomes</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;age&#39;</span><span class="p">,</span> <span class="s1">&#39;grade&#39;</span><span class="p">]</span>
<span class="p">)</span>
</pre></div>
</div>
</section>
<section id="training-an-mil-model">
<h3>Training an MIL Model<a class="headerlink" href="#training-an-mil-model" title="Permalink to this heading"></a></h3>
<p>Next, prepare a <a class="reference internal" href="../datasets_and_val/#datasets-and-validation"><span class="std std-ref">training and validation dataset</span></a> and use <a class="reference internal" href="../project/#slideflow.Project.train_mil" title="slideflow.Project.train_mil"><code class="xref py py-func docutils literal notranslate"><span class="pre">slideflow.Project.train_mil()</span></code></a> to start training. For example, to train a model using three-fold cross-validation to the outcome “HPV_status”:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="o">...</span>
<span class="c1"># Prepare a project and dataset</span>
<span class="n">P</span> <span class="o">=</span> <span class="n">sf</span><span class="o">.</span><span class="n">Project</span><span class="p">(</span><span class="o">...</span><span class="p">)</span>
<span class="n">full_dataset</span> <span class="o">=</span> <span class="n">dataset</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">dataset</span><span class="p">(</span><span class="n">tile_px</span><span class="o">=</span><span class="mi">299</span><span class="p">,</span> <span class="n">tile_um</span><span class="o">=</span><span class="mi">302</span><span class="p">)</span>
<span class="c1"># Split the dataset using three-fold, site-preserved cross-validation</span>
<span class="n">splits</span> <span class="o">=</span> <span class="n">full_dataset</span><span class="o">.</span><span class="n">kfold_split</span><span class="p">(</span>
<span class="n">k</span><span class="o">=</span><span class="mi">3</span><span class="p">,</span>
<span class="n">labels</span><span class="o">=</span><span class="s1">&#39;HPV_status&#39;</span><span class="p">,</span>
<span class="n">preserved_site</span><span class="o">=</span><span class="kc">True</span>
<span class="p">)</span>
<span class="c1"># Train on each cross-fold</span>
<span class="k">for</span> <span class="n">train</span><span class="p">,</span> <span class="n">val</span> <span class="ow">in</span> <span class="n">splits</span><span class="p">:</span>
<span class="n">P</span><span class="o">.</span><span class="n">train_mil</span><span class="p">(</span>
<span class="n">config</span><span class="o">=</span><span class="n">config</span><span class="p">,</span>
<span class="n">outcomes</span><span class="o">=</span><span class="s1">&#39;HPV_status&#39;</span><span class="p">,</span>
<span class="n">train_dataset</span><span class="o">=</span><span class="n">train</span><span class="p">,</span>
<span class="n">val_dataset</span><span class="o">=</span><span class="n">val</span><span class="p">,</span>
<span class="n">bags</span><span class="o">=</span><span class="s1">&#39;/path/to/bag_directory&#39;</span>
<span class="p">)</span>
</pre></div>
</div>
<p>Model training statistics, including validation performance (AUROC, AP) and predictions on the validation dataset, will be saved in an <code class="docutils literal notranslate"><span class="pre">mil</span></code> subfolder within the main project directory.</p>
<p>If you are training an attention-based MIL model (<code class="docutils literal notranslate"><span class="pre">attention_mil</span></code>, <code class="docutils literal notranslate"><span class="pre">clam_sb</span></code>, <code class="docutils literal notranslate"><span class="pre">clam_mb</span></code>), heatmaps of attention can be generated for each slide in the validation dataset by using the argument <code class="docutils literal notranslate"><span class="pre">attention_heatmaps=True</span></code>. You can customize these heatmaps with <code class="docutils literal notranslate"><span class="pre">interpolation</span></code> and <code class="docutils literal notranslate"><span class="pre">cmap</span></code> arguments to control the heatmap interpolation and colormap, respectively.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="c1"># Generate attention heatmaps,</span>
<span class="c1"># using the &#39;magma&#39; colormap and no interpolation.</span>
<span class="n">P</span><span class="o">.</span><span class="n">train_mil</span><span class="p">(</span>
<span class="n">attention_heatmaps</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">cmap</span><span class="o">=</span><span class="s1">&#39;magma&#39;</span><span class="p">,</span>
<span class="n">interpolation</span><span class="o">=</span><span class="kc">None</span>
<span class="p">)</span>
</pre></div>
</div>
<p>Hyperparameters, model configuration, and feature extractor information is logged to <code class="docutils literal notranslate"><span class="pre">mil_params.json</span></code> in the model directory. This file also contains information about the input and output shapes of the MIL network and outcome labels. An example file is shown below.</p>
<div class="highlight-json notranslate"><div class="highlight"><pre><span></span><span class="p">{</span>
<span class="w"> </span><span class="nt">&quot;trainer&quot;</span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;fastai&quot;</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;params&quot;</span><span class="p">:</span><span class="w"> </span><span class="p">{</span>
<span class="w"> </span><span class="p">},</span>
<span class="w"> </span><span class="nt">&quot;outcomes&quot;</span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;histology&quot;</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;outcome_labels&quot;</span><span class="p">:</span><span class="w"> </span><span class="p">{</span>
<span class="w"> </span><span class="nt">&quot;0&quot;</span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;Adenocarcinoma&quot;</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;1&quot;</span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;Squamous&quot;</span>
<span class="w"> </span><span class="p">},</span>
<span class="w"> </span><span class="nt">&quot;bags&quot;</span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;/mnt/data/projects/example_project/bags/simclr-263510/&quot;</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;input_shape&quot;</span><span class="p">:</span><span class="w"> </span><span class="mi">1024</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;output_shape&quot;</span><span class="p">:</span><span class="w"> </span><span class="mi">2</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;bags_encoder&quot;</span><span class="p">:</span><span class="w"> </span><span class="p">{</span>
<span class="w"> </span><span class="nt">&quot;extractor&quot;</span><span class="p">:</span><span class="w"> </span><span class="p">{</span>
<span class="w"> </span><span class="nt">&quot;class&quot;</span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;slideflow.model.extractors.simclr.SimCLR_Features&quot;</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;kwargs&quot;</span><span class="p">:</span><span class="w"> </span><span class="p">{</span>
<span class="w"> </span><span class="nt">&quot;center_crop&quot;</span><span class="p">:</span><span class="w"> </span><span class="kc">false</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;ckpt&quot;</span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;/mnt/data/projects/example_project/simclr/00001-EXAMPLE/ckpt-263510.ckpt&quot;</span>
<span class="w"> </span><span class="p">}</span>
<span class="w"> </span><span class="p">},</span>
<span class="w"> </span><span class="nt">&quot;normalizer&quot;</span><span class="p">:</span><span class="w"> </span><span class="kc">null</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;num_features&quot;</span><span class="p">:</span><span class="w"> </span><span class="mi">1024</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;tile_px&quot;</span><span class="p">:</span><span class="w"> </span><span class="mi">299</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;tile_um&quot;</span><span class="p">:</span><span class="w"> </span><span class="mi">302</span>
<span class="w"> </span><span class="p">}</span>
<span class="p">}</span>
</pre></div>
</div>
</section>
<section id="multi-magnification-mil">
<span id="multimag"></span><h3>Multi-Magnification MIL<a class="headerlink" href="#multi-magnification-mil" title="Permalink to this heading"></a></h3>
<p>Slideflow 2.2 introduced a multi-magnification, multi-modal MIL model, <code class="docutils literal notranslate"><span class="pre">MultiModal_Attention_MIL</span></code> (<code class="docutils literal notranslate"><span class="pre">&quot;mm_attention_mil&quot;</span></code>). This late-fusion multimodal model is based on standard attention-based MIL, but accepts multiple input modalities (e.g., multiple magnifications) simultaneously. Each input modality is processed by a separate encoder network and a separate attention module. The attention-weighted features from each modality are then concatenated and passed to a fully-connected layer.</p>
<p>Multimodal models are trained using the same API as standard MIL models. Modalities are specified using the <code class="docutils literal notranslate"><span class="pre">bags</span></code> argument to <a class="reference internal" href="../project/#slideflow.Project.train_mil" title="slideflow.Project.train_mil"><code class="xref py py-func docutils literal notranslate"><span class="pre">slideflow.Project.train_mil()</span></code></a>, where the number of modes is determined by the number of bag directories provided. Within each bag directory, bags should be generated using the same feature extractor and at the same magnification, but feature extractors and magnifications can vary between bag directories.</p>
<p>For example, to train a multimodal model using two magnifications, you would pass two bag paths to the model. In this case, the <code class="docutils literal notranslate"><span class="pre">/path/to/bags_10x</span></code> directory contains bags generated from a 10x feature extractor, and the <code class="docutils literal notranslate"><span class="pre">/path/to/bags_40x</span></code> directory contains bags generated from a 40x feature extractor.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="c1"># Configure a multimodal MIL model.</span>
<span class="n">config</span> <span class="o">=</span> <span class="n">mil_config</span><span class="p">(</span><span class="s1">&#39;mm_attention_mil&#39;</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="mf">1e-4</span><span class="p">)</span>
<span class="c1"># Set the bags paths for each modality.</span>
<span class="n">bags_10x</span> <span class="o">=</span> <span class="s1">&#39;/path/to/bags_10x&#39;</span>
<span class="n">bags_40x</span> <span class="o">=</span> <span class="s1">&#39;/path/to/bags_40x&#39;</span>
<span class="n">P</span><span class="o">.</span><span class="n">train_mil</span><span class="p">(</span>
<span class="n">config</span><span class="o">=</span><span class="n">config</span><span class="p">,</span>
<span class="n">outcomes</span><span class="o">=</span><span class="s1">&#39;HPV_status&#39;</span><span class="p">,</span>
<span class="n">train_dataset</span><span class="o">=</span><span class="n">train</span><span class="p">,</span>
<span class="n">val_dataset</span><span class="o">=</span><span class="n">val</span><span class="p">,</span>
<span class="n">bags</span><span class="o">=</span><span class="p">[</span><span class="n">bags_10x</span><span class="p">,</span> <span class="n">bags_40x</span><span class="p">]</span>
<span class="p">)</span>
</pre></div>
</div>
<p>You can use any number of modalities, and the feature extractors for each modality can be different. For example, you could train a multimodal model using features from a custom SimCLR model at 5x and features from a pretrained CTransPath model at 20x.</p>
<p>The feature extractors used for each modality, as specified in the <code class="docutils literal notranslate"><span class="pre">bags_config.json</span></code> files in the bag directories, will be logged in the final <code class="docutils literal notranslate"><span class="pre">mil_params.json</span></code> file. Multimodal MIL models can be interactively viewed in <a class="reference internal" href="../studio/#studio"><span class="std std-ref">Slideflow Studio</span></a>, allowing you to visualize the attention weights for each modality separately.</p>
</section>
<section id="custom-architectures">
<span id="custom-mil"></span><h3>Custom Architectures<a class="headerlink" href="#custom-architectures" title="Permalink to this heading"></a></h3>
<p>Training custom MIL models is straightforward with Slideflow, particularly if your model can adhere to a few simple guidelines:</p>
<ul class="simple">
<li><p>Initialized with <code class="docutils literal notranslate"><span class="pre">(num_feats,</span> <span class="pre">num_outputs)</span></code> (e.g., <code class="docutils literal notranslate"><span class="pre">Attention_MIL(768,</span> <span class="pre">2)</span></code>)</p></li>
<li><p>Input is feature bags with shape <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">num_tiles,</span> <span class="pre">num_feats)</span></code>. If the model needs a “lens” input, then the model attribute <code class="docutils literal notranslate"><span class="pre">use_lens</span></code> should be True.</p></li>
<li><p>Has a <code class="docutils literal notranslate"><span class="pre">relocate()</span></code> function that moves the model to detected device/GPU</p></li>
<li><dl class="simple">
<dt>Ability to get attention through one of two methods:</dt><dd><ul>
<li><p><code class="docutils literal notranslate"><span class="pre">forward()</span></code> function includes an optional <code class="docutils literal notranslate"><span class="pre">return_attention</span></code> argument, which if True returns attention scores after model output</p></li>
<li><p>Has a <code class="docutils literal notranslate"><span class="pre">calculate_attention()</span></code> function that returns attention scores</p></li>
</ul>
</dd>
</dl>
</li>
</ul>
<p>If the above applies to your model, you can train it simply by passing it as the first argument to <a class="reference internal" href="../mil_module/#slideflow.mil.mil_config" title="slideflow.mil.mil_config"><code class="xref py py-func docutils literal notranslate"><span class="pre">slideflow.mil.mil_config()</span></code></a>.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">slideflow</span> <span class="k">as</span> <span class="nn">sf</span>
<span class="kn">from</span> <span class="nn">slideflow.mil</span> <span class="kn">import</span> <span class="n">mil_config</span>
<span class="kn">from</span> <span class="nn">my_module</span> <span class="kn">import</span> <span class="n">CustomMIL</span>
<span class="n">config</span> <span class="o">=</span> <span class="n">mil_config</span><span class="p">(</span><span class="n">CustomMIL</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="mf">1e-3</span><span class="p">)</span>
</pre></div>
</div>
<p>For larger projects, or if you are designing a plugin/extension for Slideflow, custom models can be registered to facilitate easy creation. If your model adheres to the above guidelines, you can register it for use with the following:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">slideflow.mil</span> <span class="kn">import</span> <span class="n">register_model</span>
<span class="nd">@register_model</span>
<span class="k">def</span> <span class="nf">my_model</span><span class="p">():</span>
<span class="k">return</span> <span class="n">MyModelClass</span>
</pre></div>
</div>
<p>You can then use your model when creating an MIL configuration:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">config</span> <span class="o">=</span> <span class="n">sf</span><span class="o">.</span><span class="n">mil</span><span class="o">.</span><span class="n">mil_config</span><span class="p">(</span><span class="s1">&#39;my_model&#39;</span><span class="p">,</span> <span class="o">...</span><span class="p">)</span>
</pre></div>
</div>
<p>If the above guidelines do <em>not</em> apply to your model, or if you want to customize model logic or functionality, you can supply a custom MIL configuration class that will supervise model building and dataset preparation. Your custom configuration class should inherit <code class="docutils literal notranslate"><span class="pre">slideflow.mil.MILModelConfig</span></code>, and methods in this class can be overloaded to provide additional functionality. For example, to create an MIL configuration that uses a custom loss and custom metrics:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">slideflow.mil</span> <span class="kn">import</span> <span class="n">MILModelConfig</span>
<span class="k">class</span> <span class="nc">MyModelConfig</span><span class="p">(</span><span class="n">MILModelConfig</span><span class="p">):</span>
<span class="nd">@property</span>
<span class="k">def</span> <span class="nf">loss_fn</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">return</span> <span class="n">my_custom_loss</span>
<span class="k">def</span> <span class="nf">get_metrics</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">return</span> <span class="p">[</span><span class="n">my_metric1</span><span class="p">,</span> <span class="n">my_metric2</span><span class="p">]</span>
</pre></div>
</div>
<p>When registering your model, you should specify that it should use your custom configuration:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="nd">@register_model</span><span class="p">(</span><span class="n">config</span><span class="o">=</span><span class="n">MyModelConfig</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">my_model</span><span class="p">():</span>
<span class="k">return</span> <span class="n">MyModelClass</span>
</pre></div>
</div>
<p>For an example of how to utilize model registration and configuration customization, see our <a class="reference external" href="https://github.com/slideflow/slideflow-gpl/blob/main/slideflow_gpl/clam/config.py">CLAM implementation</a> available through <code class="docutils literal notranslate"><span class="pre">slideflow-gpl</span></code>.</p>
</section>
</section>
<section id="evaluation">
<h2>Evaluation<a class="headerlink" href="#evaluation" title="Permalink to this heading"></a></h2>
<p>To evaluate a saved MIL model on an external dataset, first extract features from a dataset, then use <a class="reference internal" href="../project/#slideflow.Project.evaluate_mil" title="slideflow.Project.evaluate_mil"><code class="xref py py-func docutils literal notranslate"><span class="pre">slideflow.Project.evaluate_mil()</span></code></a>, which displays evaluation metrics and returns predictions as a DataFrame.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">slideflow</span> <span class="k">as</span> <span class="nn">sf</span>
<span class="c1"># Prepare a project and dataset</span>
<span class="n">P</span> <span class="o">=</span> <span class="n">sf</span><span class="o">.</span><span class="n">Project</span><span class="p">(</span><span class="o">...</span><span class="p">)</span>
<span class="n">dataset</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">dataset</span><span class="p">(</span><span class="n">tile_px</span><span class="o">=</span><span class="mi">299</span><span class="p">,</span> <span class="n">tile_um</span><span class="o">=</span><span class="mi">302</span><span class="p">)</span>
<span class="c1"># Generate features using CTransPath</span>
<span class="n">ctranspath</span> <span class="o">=</span> <span class="n">sf</span><span class="o">.</span><span class="n">build_feature_extractor</span><span class="p">(</span><span class="s1">&#39;ctranspath&#39;</span><span class="p">,</span> <span class="n">resize</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">features</span> <span class="o">=</span> <span class="n">sf</span><span class="o">.</span><span class="n">DatasetFeatures</span><span class="p">(</span><span class="n">ctranspath</span><span class="p">,</span> <span class="n">dataset</span><span class="o">=</span><span class="n">dataset</span><span class="p">)</span>
<span class="n">features</span><span class="o">.</span><span class="n">to_torch</span><span class="p">(</span><span class="s1">&#39;/path/to/bag_directory&#39;</span><span class="p">)</span>
<span class="c1"># Evaluate a saved MIL model</span>
<span class="n">df</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">evaluate_mil</span><span class="p">(</span>
<span class="s1">&#39;/path/to/saved_model&#39;</span>
<span class="n">outcomes</span><span class="o">=</span><span class="s1">&#39;HPV_status&#39;</span><span class="p">,</span>
<span class="n">dataset</span><span class="o">=</span><span class="n">dataset</span><span class="p">,</span>
<span class="n">bags</span><span class="o">=</span><span class="s1">&#39;/path/to/bag_directory&#39;</span><span class="p">,</span>
<span class="p">)</span>
</pre></div>
</div>
<p>As with training, attention heatmaps can be generated for attention-based MIL models with the argument <code class="docutils literal notranslate"><span class="pre">attention_heatmaps=True</span></code>, and these can be customized using <code class="docutils literal notranslate"><span class="pre">cmap</span></code> and <code class="docutils literal notranslate"><span class="pre">interpolation</span></code> arguments.</p>
<img alt="../_images/att_heatmap.jpg" src="../_images/att_heatmap.jpg" />
</section>
<section id="generating-predictions">
<h2>Generating Predictions<a class="headerlink" href="#generating-predictions" title="Permalink to this heading"></a></h2>
<p>In addition to generating slide-level predictions during training and evaluation, you can also generate tile-level predictions and attention scores for a dataset using <a class="reference internal" href="../mil_module/#slideflow.mil.get_mil_tile_predictions" title="slideflow.mil.get_mil_tile_predictions"><code class="xref py py-func docutils literal notranslate"><span class="pre">slideflow.mil.get_mil_tile_predictions()</span></code></a>. This function returns a DataFrame containing tile-level predictions and attention.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">slideflow.mil</span> <span class="kn">import</span> <span class="n">get_mil_tile_predictions</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">df</span> <span class="o">=</span> <span class="n">get_mil_tile_predictions</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">dataset</span><span class="p">,</span> <span class="n">bags</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">df</span>
<span class="go"> slide loc_x loc_y ... y_pred3 y_pred4 y_pred5</span>
<span class="go">0 TCGA-4V-A9QI-01Z-0... 2210 7349 ... 0.181155 0.468446 0.070175</span>
<span class="go">1 TCGA-4V-A9QI-01Z-0... 5795 1971 ... 0.243721 0.131991 0.009169</span>
<span class="go">2 TCGA-4V-A9QI-01Z-0... 6273 5437 ... 0.096196 0.583367 0.090258</span>
<span class="go">3 TCGA-4V-A9QI-01Z-0... 2330 3047 ... 0.056426 0.264386 0.300199</span>
<span class="go">4 TCGA-4V-A9QI-01Z-0... 3644 3525 ... 0.134535 0.534353 0.013619</span>
<span class="go">... ... ... ... ... ... ... ...</span>
<span class="go">391809 TCGA-4X-A9FA-01Z-0... 6034 3352 ... 0.004119 0.003636 0.005673</span>
<span class="go">391810 TCGA-4X-A9FA-01Z-0... 6643 1401 ... 0.012790 0.010269 0.011726</span>
<span class="go">391811 TCGA-4X-A9FA-01Z-0... 5546 2011 ... 0.009777 0.013556 0.025255</span>
<span class="go">391812 TCGA-4X-A9FA-01Z-0... 6277 2864 ... 0.026638 0.018499 0.031061</span>
<span class="go">391813 TCGA-4X-A9FA-01Z-0... 4083 4205 ... 0.009875 0.009582 0.022125</span>
<span class="go">[391814 rows x 15 columns]</span>
</pre></div>
</div>
</section>
<section id="single-slide-inference">
<h2>Single-Slide Inference<a class="headerlink" href="#single-slide-inference" title="Permalink to this heading"></a></h2>
<p>Predictions can also be generated for individual slides, without requiring the user to manually generate feature bags. Use <code class="xref py py-func docutils literal notranslate"><span class="pre">slideflow.model.predict_slide()</span></code> to generate predictions for a single slide. The first argument is th path to the saved MIL model (a directory containing <code class="docutils literal notranslate"><span class="pre">mil_params.json</span></code>), and the second argument can either be a path to a slide or a loaded <code class="xref py py-class docutils literal notranslate"><span class="pre">sf.WSI</span></code> object.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">slideflow.mil</span> <span class="kn">import</span> <span class="n">predict_slide</span>
<span class="kn">from</span> <span class="nn">slideflow.slide</span> <span class="kn">import</span> <span class="n">qc</span>
<span class="c1"># Load a slide and apply Otsu thresholding</span>
<span class="n">slide</span> <span class="o">=</span> <span class="s1">&#39;/path/to/slide.svs&#39;</span>
<span class="n">wsi</span> <span class="o">=</span> <span class="n">sf</span><span class="o">.</span><span class="n">WSI</span><span class="p">(</span><span class="n">slide</span><span class="p">,</span> <span class="n">tile_px</span><span class="o">=</span><span class="mi">299</span><span class="p">,</span> <span class="n">tile_um</span><span class="o">=</span><span class="mi">302</span><span class="p">)</span>
<span class="n">wsi</span><span class="o">.</span><span class="n">qc</span><span class="p">(</span><span class="n">qc</span><span class="o">.</span><span class="n">Otsu</span><span class="p">())</span>
<span class="c1"># Calculate predictions and attention heatmap</span>
<span class="n">model</span> <span class="o">=</span> <span class="s1">&#39;/path/to/mil_model&#39;</span>
<span class="n">y_pred</span><span class="p">,</span> <span class="n">y_att</span> <span class="o">=</span> <span class="n">predict_slide</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">wsi</span><span class="p">)</span>
</pre></div>
</div>
<p>The function will return a tuple of predictions and attention heatmaps. If the model is not attention-based, the attention heatmap will be <code class="docutils literal notranslate"><span class="pre">None</span></code>. To calculate attention for a model, set <code class="docutils literal notranslate"><span class="pre">attention=True</span></code>:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">y_pred</span><span class="p">,</span> <span class="n">y_att</span> <span class="o">=</span> <span class="n">predict_slide</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">slide</span><span class="p">,</span> <span class="n">attention</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
</pre></div>
</div>
<p>The returned attention values will be a masked <code class="docutils literal notranslate"><span class="pre">numpy.ndarray</span></code> with the same shape as the slide tile extraction grid. Unused tiles will have masked attention values.</p>
</section>
<section id="visualizing-predictions">
<h2>Visualizing Predictions<a class="headerlink" href="#visualizing-predictions" title="Permalink to this heading"></a></h2>
<p>Heatmaps of attention and tile-level predictions can be interactively visualized in Slideflow Studio by enabling the Multiple-Instance Learning extension (new in Slideflow 2.1.0). This extension is discussed in more detail in the <a class="reference internal" href="../studio/#extensions"><span class="std std-ref">Extensions</span></a> section.</p>
</section>
</section>
</article>
</div>
<footer>
<div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
<a href="../ssl/" class="btn btn-neutral float-right" title="Self-Supervised Learning (SSL)" accesskey="n" rel="next">Next <img src="../_static/images/chevron-right-orange.svg" class="next-page"></a>
<a href="../features/" class="btn btn-neutral" title="Generating Features" accesskey="p" rel="prev"><img src="../_static/images/chevron-right-orange.svg" class="previous-page"> Previous</a>
</div>
<hr>
<div role="contentinfo">
<p>
&copy; Copyright 2023, James M Dolezal.
</p>
</div>
<div>
Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
</div>
</footer>
</div>
</div>
<div class="pytorch-content-right" id="pytorch-content-right">
<div class="pytorch-right-menu" id="pytorch-right-menu">
<div class="pytorch-side-scroll" id="pytorch-side-scroll-right">
<ul>
<li><a class="reference internal" href="#">Multiple-Instance Learning (MIL)</a><ul>
<li><a class="reference internal" href="#generating-features">Generating Features</a></li>
<li><a class="reference internal" href="#training">Training</a><ul>
<li><a class="reference internal" href="#model-configuration">Model Configuration</a></li>
<li><a class="reference internal" href="#classification-regression">Classification &amp; Regression</a></li>
<li><a class="reference internal" href="#training-an-mil-model">Training an MIL Model</a></li>
<li><a class="reference internal" href="#multi-magnification-mil">Multi-Magnification MIL</a></li>
<li><a class="reference internal" href="#custom-architectures">Custom Architectures</a></li>
</ul>
</li>
<li><a class="reference internal" href="#evaluation">Evaluation</a></li>
<li><a class="reference internal" href="#generating-predictions">Generating Predictions</a></li>
<li><a class="reference internal" href="#single-slide-inference">Single-Slide Inference</a></li>
<li><a class="reference internal" href="#visualizing-predictions">Visualizing Predictions</a></li>
</ul>
</li>
</ul>
</div>
</div>
</div>
</section>
</div>
<script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
<script data-url_root="../" id="documentation_options" src="../_static/documentation_options.js"></script>
<script src="../_static/doctools.js"></script>
<script src="../_static/sphinx_highlight.js"></script>
<script type="text/javascript" src="../_static/js/vendor/jquery-3.6.3.min.js"></script>
<script type="text/javascript" src="../_static/js/vendor/popper.min.js"></script>
<script type="text/javascript" src="../_static/js/vendor/bootstrap.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
<script type="text/javascript" src="../_static/js/theme.js"></script>
<script type="text/javascript">
jQuery(function () {
SphinxRtdTheme.Navigation.enable(true);
});
</script>
<!-- Begin Footer -->
<!-- End Footer -->
<!-- Begin Mobile Menu -->
<div class="mobile-main-menu">
<div class="container-fluid">
<div class="container">
<div class="mobile-main-menu-header-container">
<a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
<a class="main-menu-close-button" href="#" data-behavior="close-mobile-menu"></a>
</div>
</div>
</div>
<div class="mobile-main-menu-links-container">
<div class="main-menu">
<ul>
<li>
<a href="https://slideflow.dev">Docs</a>
</li>
<li>
<a href="https://slideflow.dev/tutorial1/">Tutorials</a>
</li>
<li>
<a href="https://github.com/slideflow/slideflow">Github</a>
</li>
</ul>
</div>
</div>
</div>
<!-- End Mobile Menu -->
<script script type="text/javascript">
var collapsedSections = [];
</script>
<script type="text/javascript" src="../_static/js/vendor/anchor.min.js"></script>
<script type="text/javascript">
$(document).ready(function() {
mobileMenu.bind();
mobileTOC.bind();
pytorchAnchors.bind();
sideMenus.bind();
scrollToAnchor.bind();
highlightNavigation.bind();
mainMenuDropdown.bind();
filterTags.bind();
// Add class to links that have code blocks, since we cannot create links in code blocks
$("article.pytorch-article a span.pre").each(function(e) {
$(this).closest("a").addClass("has-code");
});
})
</script>
</body>
</html>