Switch to unified view

a b/Calculate-Transforms.groovy
1
/***
2
 * See https://github.com/MarkZaidi/QuPath-Image-Alignment/blob/main/Calculate-Transforms.groovy for most up-to-date version.
3
 * Please link to github repo when referencing code in forums, as the code will be continually updated.
4
 *
5
 * Script to align 2 or more images in the project with different pixel sizes, using either intensities or annotations.
6
 * Run from any image in a project containing all sets of images that require alignment
7
 * Writes the affine transform to an object inside the Affine subfolder of your project folder.
8
 * Also grabs the detection objects from the template image. Can change this to annotation objects.
9
 * Usage:
10
 * - Load in all sets of images to be aligned. Rename file names such that the only underscore (_) in the image name
11
 *   separates the SlideID from stain. Example: N19-1107 30Gy M5_PANEL2.vsi
12
 * - Adjust the inputs specified under "Needed inputs", and run script (can run on any image, iterates over entire project)
13
 *  - If script errors due to alignment failing to converge, set 'align_specific' to the SlideID of the image it failed on
14
 *  - Set 'skip_image' to 1, rerun script to skip over the error-causing image
15
 *  - Set 'skip_image' to 0, and either adjust 'AutoAlignPixelSize' or draw tissue annotations on all stains of images in list
16
 *  - run script, verify all moving images contain a transform file located in the 'Affine' folder
17
 *
18
 * Needed inputs:
19
 * - registrationType : Set as "AFFINE" for translations, rotations, scaling, and sheering. Set as "RIGID" for only translations and rotations.
20
 * - refStain : Set to stain name of image to align all subsequent images to
21
 * - wsiExt : file name extension
22
 * - align_specific : see above, set to null for first run through
23
 * - AutoAlignPixelSize : downsample factor when calculating the transform. Greater values result in faster calculation, but may impact quality
24
 * - skip_image see above, value doesn't matter if align_specific is null
25
 *
26
 *
27
 * Script largely adapted from Sara McArdle's callable implementation of QuPath's Interactive Image Alignment, and Yau Mun Lim's method
28
 * of matching reference (static) and overlay (moving) images based on file names.
29
 *
30
 */
31
32
import qupath.lib.objects.PathCellObject
33
import qupath.lib.objects.PathDetectionObject
34
import qupath.lib.objects.PathObject
35
import qupath.lib.objects.PathObjects
36
import qupath.lib.objects.PathTileObject
37
import qupath.lib.objects.classes.PathClassFactory
38
import qupath.lib.roi.RoiTools
39
import qupath.lib.roi.interfaces.ROI
40
41
import java.awt.geom.AffineTransform
42
import javafx.scene.transform.Affine
43
import qupath.lib.images.servers.ImageServer
44
45
import java.awt.Graphics2D
46
import java.awt.Transparency
47
import java.awt.color.ColorSpace
48
import java.awt.image.BufferedImage
49
50
import org.bytedeco.opencv.global.opencv_core;
51
import org.bytedeco.opencv.opencv_core.Mat;
52
import org.bytedeco.opencv.opencv_core.TermCriteria;
53
import org.bytedeco.opencv.global.opencv_video;
54
import org.bytedeco.javacpp.indexer.FloatIndexer;
55
import org.bytedeco.javacpp.indexer.Indexer;
56
57
import qupath.lib.gui.dialogs.Dialogs;
58
import qupath.lib.images.servers.PixelCalibration;
59
60
import qupath.lib.regions.RegionRequest;
61
import qupath.opencv.tools.OpenCVTools
62
63
import java.awt.image.ComponentColorModel
64
import java.awt.image.DataBuffer
65
66
import static qupath.lib.gui.scripting.QPEx.*;
67
68
// Variables to set
69
//////////////////////////////////
70
String registrationType="AFFINE" //Specify as "RIGID" or "AFFINE"
71
String refStain = "PTEN" //stain to use as reference image (all images will be aligned to this)
72
String wsiExt = ".ndpi" //image name extension
73
//def align_specific=['N19-1107 30Gy M5']//If auto-align on intensity fails, put the image(s) that it fails on here
74
def AutoAlignPixelSize = 30 //downsample factor for calculating transform (tform). Does not affect scaling of output image
75
align_specific=null //When referencing an image, just include the slide name (stuff before _)
76
skip_image=0 // If 1, skips the images defined by 'align_specific'. If 0, skips all but image(s) in 'align_specific'
77
//Experimental features
78
use_single_channel=0 // Use a single channel from each image for alignment (set to channel number to use). Set to 0 to use all channels.
79
iterations=5 // Number of times to iteratively calculate the transformation
80
mov_rotation=180 // rotation to apply to ALL moving images before calculating alignment. Strongly recommend ensuring proper orientation before loading into QuPath.
81
decrement_factor=1.1 // if iterations>1, by what factor to decrease AutoAlignPixelSize (increasing resolution of alignment). Set to 1 to leave AutoAlignPixelSize unchanged across iterations.
82
83
/////////////////////////////////
84
85
86
//Lim's code for file name matching
87
// Get list of all images in project
88
def projectImageList = getProject().getImageList()
89
90
// Create empty lists
91
def imageNameList = []
92
def slideIDList = []
93
def stainList = []
94
def missingList = []
95
96
// Split image file names to desired variables and add to previously created lists
97
for (entry in projectImageList) {
98
    def name = entry.getImageName()
99
    def (imageName, imageExt) = name.split('\\.')
100
    def (slideID, stain) = imageName.split('_')
101
    imageNameList << imageName
102
    slideIDList << slideID
103
    stainList << stain
104
}
105
106
// Remove duplicate entries from lists
107
slideIDList = slideIDList.unique()
108
stainList = stainList.unique()
109
print (slideIDList)
110
print (align_specific)
111
// Remove specific entries if causing alignment to not converge
112
if (align_specific != null)
113
    if (skip_image == 1)
114
        slideIDList.removeAll(align_specific)
115
    else
116
        slideIDList.retainAll(align_specific)
117
118
119
if (stainList.size() == 1) {
120
    print 'Only one stain detected. Target slides may not be loaded.'
121
    return
122
}
123
124
// Create Affine folder to put transformation matrix files
125
path = buildFilePath(PROJECT_BASE_DIR, 'Affine')
126
mkdirs(path)
127
128
// Process all combinations of slide IDs, tissue blocks, and stains based on reference stain slide onto target slides
129
for (slide in slideIDList) {
130
    for (stain in stainList) {
131
        if (stain != refStain) {
132
            refFileName = slide + "_" + refStain + wsiExt
133
            targetFileName = slide + "_" + stain + wsiExt
134
            path = buildFilePath(PROJECT_BASE_DIR, 'Affine', targetFileName)
135
            
136
            def refImage = projectImageList.find {it.getImageName() == refFileName}
137
            def targetImage = projectImageList.find {it.getImageName() == targetFileName}
138
            if (refImage == null) {
139
                print 'Reference slide ' + refFileName + ' missing!'
140
                missingList << refFileName
141
                continue
142
            }
143
            if (targetImage == null) {
144
                print 'Target slide ' + targetFileName + ' missing!'
145
                missingList << targetFileName
146
                continue
147
            }
148
            println("Aligning reference " + refFileName + " to target " + targetFileName)
149
            //McArdle's code for image alignment
150
            ImageServer<BufferedImage> serverBase = refImage.readImageData().getServer()
151
            ImageServer<BufferedImage> serverOverlay = targetImage.readImageData().getServer()
152
            def static_img_name = refFileName
153
            def moving_img_name = targetFileName
154
            def project_name = getProject()
155
            def entry_name_static = project_name.getImageList().find { it.getImageName() == static_img_name }
156
            def entry_name_moving = project_name.getImageList().find { it.getImageName() == moving_img_name }
157
158
            def serverBaseMark = entry_name_static.readImageData()
159
            def serverOverlayMark = entry_name_moving.readImageData()
160
            Affine affine=[]
161
162
163
            mov_width=serverOverlayMark.getServer().getWidth()
164
            mov_height=serverOverlayMark.getServer().getHeight()
165
            affine.prependRotation(mov_rotation,mov_width/2,mov_height/2)
166
167
168
169
            for(int i = 0;i<iterations;i++) {
170
                //Perform the alignment. If no annotations present, use intensity. If annotations present, use area
171
                print("autoalignpixelsize:" + AutoAlignPixelSize)
172
                if (serverBaseMark.hierarchy.nObjects() > 0 || serverOverlayMark.hierarchy.nObjects() > 0)
173
                    autoAlignPrep(AutoAlignPixelSize, "AREA", serverBaseMark, serverOverlayMark, affine, registrationType, use_single_channel)
174
                else
175
                    autoAlignPrep(AutoAlignPixelSize, "notAREA", serverBaseMark, serverOverlayMark, affine, registrationType, use_single_channel)
176
                AutoAlignPixelSize/=decrement_factor
177
                if (AutoAlignPixelSize<1){
178
                    AutoAlignPixelSize=1
179
                }
180
            }
181
182
            def matrix = []
183
184
185
            matrix << affine.getMxx()
186
            matrix << affine.getMxy()
187
            matrix << affine.getTx()
188
            matrix << affine.getMyx()
189
            matrix << affine.getMyy()
190
            matrix << affine.getTy()
191
192
            new File(path).withObjectOutputStream {
193
                it.writeObject(matrix)
194
            }
195
        }
196
    }
197
}
198
199
if (missingList.isEmpty() == true) {
200
    print 'Done!'
201
} else {
202
    missingList = missingList.unique()
203
    print 'Done! Missing slides: ' + missingList
204
}
205
206
207
/*Subfunctions taken from here:
208
https://github.com/qupath/qupath/blob/a1465014c458d510336993802efb08f440b50cc1/qupath-experimental/src/main/java/qupath/lib/gui/align/ImageAlignmentPane.java
209
 */
210
211
//creates an image server using the actual images (for intensity-based alignment) or a labeled image server (for annotation-based).
212
double autoAlignPrep(double requestedPixelSizeMicrons, String alignmentMethod, ImageData<BufferedImage> imageDataBase, ImageData<BufferedImage> imageDataSelected, Affine affine,String registrationType, int use_single_channel) throws IOException {
213
    ImageServer<BufferedImage> serverBase, serverSelected;
214
215
    if (alignmentMethod == 'AREA') {
216
        logger.debug("Image alignment using area annotations");
217
        Map<PathClass, Integer> labels = new LinkedHashMap<>();
218
        int label = 1;
219
        labels.put(PathClassFactory.getPathClassUnclassified(), label++);
220
        for (def annotation : imageDataBase.getHierarchy().getAnnotationObjects()) {
221
            def pathClass = annotation.getPathClass();
222
            if (pathClass != null && !labels.containsKey(pathClass))
223
                labels.put(pathClass, label++);
224
        }
225
        for (def annotation : imageDataSelected.getHierarchy().getAnnotationObjects()) {
226
            def pathClass = annotation.getPathClass();
227
            if (pathClass != null && !labels.containsKey(pathClass))
228
                labels.put(pathClass, label++);
229
        }
230
231
        double downsampleBase = requestedPixelSizeMicrons / imageDataBase.getServer().getPixelCalibration().getAveragedPixelSize().doubleValue();
232
        serverBase = new LabeledImageServer.Builder(imageDataBase)
233
                .backgroundLabel(0)
234
                .addLabels(labels)
235
                .downsample(downsampleBase)
236
                .build();
237
238
        double downsampleSelected = requestedPixelSizeMicrons / imageDataSelected.getServer().getPixelCalibration().getAveragedPixelSize().doubleValue();
239
        serverSelected = new LabeledImageServer.Builder(imageDataSelected)
240
                .backgroundLabel(0)
241
                .addLabels(labels)
242
                .downsample(downsampleSelected)
243
                .build();
244
        //disable single channel alignment when working with Area annotations, unsure what bugs it can cause
245
        use_single_channel=0
246
    } else {
247
        // Default - just use intensities
248
        logger.debug("Image alignment using intensities");
249
        serverBase = imageDataBase.getServer();
250
        serverSelected = imageDataSelected.getServer();
251
    }
252
253
    scaleFactor=autoAlign(serverBase, serverSelected, registrationType, affine, requestedPixelSizeMicrons,use_single_channel);
254
    return scaleFactor
255
}
256
257
double autoAlign(ImageServer<BufferedImage> serverBase, ImageServer<BufferedImage> serverOverlay, String regionstrationType, Affine affine, double requestedPixelSizeMicrons, use_single_channel) {
258
    PixelCalibration calBase = serverBase.getPixelCalibration()
259
    double pixelSizeBase = calBase.getAveragedPixelSizeMicrons()
260
    double downsampleBase = 1
261
    if (!Double.isFinite(pixelSizeBase)) {
262
      //  while (serverBase.getWidth() / downsampleBase > 2000)
263
       //     downsampleBase++;
264
       // logger.warn("Pixel size is unavailable! Default downsample value of {} will be used", downsampleBase)
265
        pixelSizeBase=50
266
        downsampleBase = requestedPixelSizeMicrons / pixelSizeBase
267
    } else {
268
        downsampleBase = requestedPixelSizeMicrons / pixelSizeBase
269
    }
270
271
    PixelCalibration calOverlay = serverOverlay.getPixelCalibration()
272
    double pixelSizeOverlay = calOverlay.getAveragedPixelSizeMicrons()
273
    double downsampleOverlay = 1
274
    if (!Double.isFinite(pixelSizeOverlay)) {
275
    //    while (serverBase.getWidth() / downsampleOverlay > 2000)
276
    //        downsampleOverlay++;
277
     //   logger.warn("Pixel size is unavailable! Default downsample value of {} will be used", downsampleOverlay)
278
        pixelSizeOverlay=50
279
        downsampleOverlay = requestedPixelSizeMicrons / pixelSizeOverlay
280
    } else {
281
        downsampleOverlay = requestedPixelSizeMicrons / pixelSizeOverlay
282
    }
283
284
    double scaleFactor=downsampleBase/downsampleOverlay
285
286
    BufferedImage imgBase = serverBase.readBufferedImage(RegionRequest.createInstance(serverBase.getPath(), downsampleBase, 0, 0, serverBase.getWidth(), serverBase.getHeight()))
287
    BufferedImage imgOverlay = serverOverlay.readBufferedImage(RegionRequest.createInstance(serverOverlay.getPath(), downsampleOverlay, 0, 0, serverOverlay.getWidth(), serverOverlay.getHeight()))
288
289
    //Determine whether to calculate intensity-based alignment using all channels or a single channel
290
    Mat matBase
291
    Mat matOverlay
292
    if (use_single_channel==0) {
293
        //print 'using all channels'
294
        imgBase = ensureGrayScale(imgBase)
295
        imgOverlay = ensureGrayScale(imgOverlay)
296
        matBase = OpenCVTools.imageToMat(imgBase)
297
        matOverlay = OpenCVTools.imageToMat(imgOverlay)
298
299
    } else {
300
301
        matBase = OpenCVTools.imageToMat(imgBase)
302
        matOverlay = OpenCVTools.imageToMat(imgOverlay)
303
        int channel = use_single_channel-1
304
        //print ('using channel ' + channel)
305
        matBase = OpenCVTools.splitChannels(matBase)[channel]
306
        matOverlay = OpenCVTools.splitChannels(matOverlay)[channel]
307
        //use this to preview how the channel looks
308
        //OpenCVTools.matToImagePlus('Channel:' + channel.toString(), matBase).show()
309
    }
310
311
312
    /////pete code block/////
313
314
//// New bit
315
//    int channel = 2
316
//    matBase = OpenCVTools.splitChannels(matBase)[channel]
317
//    matOverlay = OpenCVTools.splitChannels(matOverlay)[channel]
318
//  ///end pete code block///
319
320
    Mat matTransform = Mat.eye(2, 3, opencv_core.CV_32F).asMat()
321
// Initialize using existing transform
322
//      affine.setToTransform(mxx, mxy, tx, myx, myy, ty)
323
    try {
324
        FloatIndexer indexer = matTransform.createIndexer()
325
        indexer.put(0, 0, (float)affine.getMxx())
326
        indexer.put(0, 1, (float)affine.getMxy())
327
        indexer.put(0, 2, (float)(affine.getTx() / downsampleBase))
328
        indexer.put(1, 0, (float)affine.getMyx())
329
        indexer.put(1, 1, (float)affine.getMyy())
330
        indexer.put(1, 2, (float)(affine.getTy() / downsampleBase))
331
//          System.err.println(indexer)
332
    } catch (Exception e) {
333
        logger.error("Error closing indexer", e)
334
    }
335
336
    TermCriteria termCrit = new TermCriteria(TermCriteria.COUNT, 100, 0.0001)
337
338
    try {
339
        int motion
340
        switch (regionstrationType) {
341
            case "AFFINE":
342
                motion = opencv_video.MOTION_AFFINE
343
                break
344
            case "RIGID":
345
                motion = opencv_video.MOTION_EUCLIDEAN
346
                break
347
            default:
348
                logger.warn("Unknown registraton type {} - will use {}", regionstrationType, RegistrationType.AFFINE)
349
                motion = opencv_video.MOTION_AFFINE
350
                break
351
        }
352
        double result = opencv_video.findTransformECC(matBase, matOverlay, matTransform, motion, termCrit, null)
353
        logger.info("Transformation result: {}", result)
354
    } catch (Exception e) {
355
        Dialogs.showErrorNotification("Estimate transform", "Unable to estimated transform - result did not converge")
356
        logger.error("Unable to estimate transform", e)
357
        return
358
    }
359
360
// To use the following function, images need to be the same size
361
//      def matTransform = opencv_video.estimateRigidTransform(matBase, matOverlay, false);
362
    Indexer indexer = matTransform.createIndexer()
363
    affine.setToTransform(
364
            indexer.getDouble(0, 0),
365
            indexer.getDouble(0, 1),
366
            indexer.getDouble(0, 2) * downsampleBase,
367
            indexer.getDouble(1, 0),
368
            indexer.getDouble(1, 1),
369
            indexer.getDouble(1, 2) * downsampleBase
370
    )
371
    indexer.release()
372
373
    matBase.release()
374
    matOverlay.release()
375
    matTransform.release()
376
377
    return scaleFactor
378
}
379
380
//to gather detection objects instead of annotation, change line ~250 to def pathObjects = otherHierarchy.getDetectionObjects()
381
def GatherObjects(boolean deleteExisting, boolean createInverse, File f){
382
    f.withObjectInputStream {
383
        matrix = it.readObject()
384
385
        // Get the project & the requested image name
386
        def project = getProject()
387
        def entry = project.getImageList().find {it.getImageName()+".aff" == f.getName()}
388
        if (entry == null) {
389
            print 'Could not find image with name ' + f.getName()
390
            return
391
        }
392
393
        def otherHierarchy = entry.readHierarchy()
394
        def pathObjects = otherHierarchy.getDetectionObjects() //OR getAnnotationObjects()
395
396
        // Define the transformation matrix
397
        def transform = new AffineTransform(
398
                matrix[0], matrix[3], matrix[1],
399
                matrix[4], matrix[2], matrix[5]
400
        )
401
        if (createInverse)
402
            transform = transform.createInverse()
403
404
        if (deleteExisting)
405
            clearAllObjects()
406
407
        def newObjects = []
408
        for (pathObject in pathObjects) {
409
            newObjects << transformObject(pathObject, transform)
410
        }
411
        addObjects(newObjects)
412
    }
413
}
414
415
//other subfunctions
416
417
PathObject transformObject(PathObject pathObject, AffineTransform transform) {
418
    // Create a new object with the converted ROI
419
    def roi = pathObject.getROI()
420
    def roi2 = transformROI(roi, transform)
421
    def newObject = null
422
    if (pathObject instanceof PathCellObject) {
423
        def nucleusROI = pathObject.getNucleusROI()
424
        if (nucleusROI == null)
425
            newObject = PathObjects.createCellObject(roi2, pathObject.getPathClass(), pathObject.getMeasurementList())
426
        else
427
            newObject = PathObjects.createCellObject(roi2, transformROI(nucleusROI, transform), pathObject.getPathClass(), pathObject.getMeasurementList())
428
    } else if (pathObject instanceof PathTileObject) {
429
        newObject = PathObjects.createTileObject(roi2, pathObject.getPathClass(), pathObject.getMeasurementList())
430
    } else if (pathObject instanceof PathDetectionObject) {
431
        newObject = PathObjects.createDetectionObject(roi2, pathObject.getPathClass(), pathObject.getMeasurementList())
432
        newObject.setName(pathObject.getName())
433
    } else {
434
        newObject = PathObjects.createAnnotationObject(roi2, pathObject.getPathClass(), pathObject.getMeasurementList())
435
        newObject.setName(pathObject.getName())
436
    }
437
    // Handle child objects
438
    if (pathObject.hasChildren()) {
439
        newObject.addPathObjects(pathObject.getChildObjects().collect({transformObject(it, transform)}))
440
    }
441
    return newObject
442
}
443
444
ROI transformROI(ROI roi, AffineTransform transform) {
445
    def shape = RoiTools.getShape(roi) // Should be able to use roi.getShape() - but there's currently a bug in it for rectangles/ellipses!
446
    shape2 = transform.createTransformedShape(shape)
447
    return RoiTools.getShapeROI(shape2, roi.getImagePlane(), 0.5)
448
}
449
450
static BufferedImage ensureGrayScale(BufferedImage img) {
451
    if (img.getType() == BufferedImage.TYPE_BYTE_GRAY)
452
        return img
453
    if (img.getType() == BufferedImage.TYPE_BYTE_INDEXED) {
454
        ColorSpace cs = ColorSpace.getInstance(ColorSpace.CS_GRAY)
455
        def colorModel = new ComponentColorModel(cs, 8 as int[], false, true,
456
                Transparency.OPAQUE,
457
                DataBuffer.TYPE_BYTE)
458
        return new BufferedImage(colorModel, img.getRaster(), false, null)
459
    }
460
    BufferedImage imgGray = new BufferedImage(img.getWidth(), img.getHeight(), BufferedImage.TYPE_BYTE_GRAY)
461
    Graphics2D g2d = imgGray.createGraphics()
462
    g2d.drawImage(img, 0, 0, null)
463
    g2d.dispose()
464
    return imgGray
465
}