Diff of /Apply-Transforms.groovy [000000] .. [104896]

Switch to unified view

a b/Apply-Transforms.groovy
1
/**
2
 * See https://github.com/MarkZaidi/QuPath-Image-Alignment/blob/main/Apply-Transforms.groovy for most up-to-date version.
3
 * Please link to github repo when referencing code in forums, as the code will be continually updated.
4
 *
5
 * Script is largely adapted from Pete Bankhead's script:
6
 * https://gist.github.com/petebankhead/db3a3c199546cadc49a6c73c2da14d6c#file-qupath-concatenate-channels-groovy
7
 * with parts from Yae Mun Lim for file name matching
8
 *
9
 * Script reads in a set of Affine transformations generated from 'Calculate-Transforms.groovy' and applies them to
10
 * images in the project. The result is a multichannel image containing the reference image, with all aligned images
11
 * appended to it. Optionally, stain separation can be performed on brightfield images, although it is recommended that
12
 * you compute and set the color vectors as outlined in https://qupath.readthedocs.io/en/latest/docs/tutorials/separating_stains.html
13
 *
14
 * Usage:
15
 * - Run 'Calculate-Transforms.groovy' to generate the necessary transform (tform) matrices required.
16
 * - Set 'refStain' to the same reference stain as used in 'Calculate-Transforms.groovy'
17
 * - Run script only on images containing 'refStain'
18
 * Bugs:
19
 * - If one image name is contained within another image name in the project (i.e. project containing image names 'sample_stainx.tiff' and 'sample-1_stainx.tiff',
20
 *   File matching may get confused, thinking they are part of the same set. If that's not intentional, make sure images of different sets do not contain the SlideID
21
 *   in others' SlideID
22
 * - If one or more of the images has been rotated on import, QuPath will throw errors while writing ome.tiff (but still complete their writing). Solution is to either make sure no images are rotated
23
 *   on import (rely on manual alignment) or run 'read_and_rewrite_ometiff.py' on the folder containing the written images (reading and rewriting seems to fix it?).
24
 *   If neither are done, image cannot be loaded into QuPath (works fine in Python, MATLAB, etc.)
25
 */
26
/** Comments from pete's script:
27
 * Merge images along the channels dimension in QuPath v0.2.0.
28
 *
29
 * This shows how multiple images can be combined by channel concatenation,
30
 * optionally applying color deconvolution or affine transformations along the way.
31
 * It may be applied to either brightfield images (with stains set) or fluorescence images.
32
 *
33
 * The result can be written to a file (if 'pathOutput' is defined) or opened in the QuPath viewer.
34
 *
35
 * Writing to a file is *strongly recommended* to ensure the result is preserved.
36
 * Opening in the viewer directly will have quite slow performance (as the transforms are applied dynamically)
37
 * and there is no guarantee the image can be reopened later, since the representation of the
38
 * transforms might change in future versions... so this is really only to preview results.
39
 *
40
 * Note QuPath does *not* offer full whole slide image registration - and there are no
41
 * plans to change this. If you require image registration, you probably need to use other
42
 * software to achieve this, and perhaps then import the registered images into QuPath later.
43
 *
44
 * Rather, this script is limited to applying a pre-defined affine transformation to align two or more
45
 * images. In the case where image registration has already been applied, it can be used to
46
 * concatenate images along the channel dimension without any addition transformation.
47
 *
48
 * In its current form, the script assumes you have an open project containing the images
49
 * OS-2.ndpi and OS-3.ndpi from the OpenSlide freely-distributable test data,
50
 * and the image type (and color deconvolution stains) have been set.
51
 * The script will apply a pre-defined affine transform to align the images (*very* roughly!),
52
 * and write their deconvolved channels together as a single 6-channel pseudo-fluorescence image.
53
 *
54
 * You will need to change the image names & add the correct transforms to apply it elsewhere.
55
 *
56
 * USE WITH CAUTION!
57
 * This uses still-in-development parts of QuPath that are not officially documented,
58
 * and may change or be removed in future versions.
59
 *
60
 * Made available due to frequency of questions, not readiness of code.
61
 *
62
 * For these reasons, I ask that you refrain from posting the script elsewhere, and instead link to this
63
 * Gist so that anyone requiring it can get the latest version.
64
 *
65
 * @author Pete Bankhead
66
 */
67
68
69
import javafx.application.Platform
70
import javafx.scene.transform.Affine
71
import qupath.lib.images.ImageData
72
import qupath.lib.images.servers.ImageChannel
73
import qupath.lib.images.servers.ImageServer
74
import qupath.lib.images.servers.ImageServers
75
76
import java.awt.image.BufferedImage
77
import java.util.stream.Collectors
78
79
import qupath.lib.images.servers.TransformedServerBuilder
80
import java.awt.geom.AffineTransform
81
82
import static qupath.lib.gui.scripting.QPEx.*
83
def currentImageName = getProjectEntry().getImageName()
84
// Variables to set
85
//////////////////////////////////////////////////////////////
86
87
def deleteExisting = false // SET ME! Delete existing objects
88
def createInverse = true // SET ME! Change this if things end up in the wrong place
89
def performDeconvolution = false // If brightfield image, separate channels into individual stains (remember to set them in original image)
90
String refStain = "PTEN" // Specify reference stain, should be same as in 'Calculate-Transforms.groovy'
91
// Define an output path where the merged file should be written
92
// Recommended to use extension .ome.tif (required for a pyramidal image)
93
// If null, the image will be opened in a viewer
94
String pathOutput = null
95
//pathOutput = buildFilePath(PROJECT_BASE_DIR, currentImageName + '.ome.tif')
96
double outputDownsample = 1 // Choose how much to downsample the output (can be *very* slow to export large images with downsample 1!)
97
98
//////////////////////////////////////////////////////////////
99
100
// Affine folder path
101
path = buildFilePath(PROJECT_BASE_DIR, 'Affine')
102
103
// Get list of all images in project
104
def projectImageList = getProject().getImageList()
105
106
def list_of_moving_image_names=[]
107
def list_of_transforms=[]
108
def list_of_reference_image_names=[]
109
// Read and obtain filenames from Affine folder
110
new File(path).eachFile{ f->
111
    f.withObjectInputStream {
112
        matrix = it.readObject()
113
114
        def targetFileName = f.getName()
115
        list_of_moving_image_names << targetFileName
116
        def (targetImageName, imageExt) = targetFileName.split('\\.')
117
        def (slideID, targetStain) = targetImageName.split('_')
118
119
        def targetImage = projectImageList.find {it.getImageName() == targetFileName}
120
        if (targetImage == null) {
121
            print 'Could not find image with name ' + f.getName()
122
            return
123
        }
124
125
        def targetImageData = targetImage.readImageData()
126
        def targetHierarchy = targetImageData.getHierarchy()
127
128
        refFileName = slideID + "_" + refStain + "." + imageExt
129
        list_of_reference_image_names << refFileName
130
131
        def refImage = projectImageList.find {it.getImageName() == refFileName}
132
        def refImageData = refImage.readImageData()
133
        def refHierarchy = refImageData.getHierarchy()
134
135
        def pathObjects = refHierarchy.getAnnotationObjects()
136
137
138
        // Define the transformation matrix
139
        def transform = new AffineTransform(
140
                matrix[0], matrix[3], matrix[1],
141
                matrix[4], matrix[2], matrix[5]
142
        )
143
        if (createInverse)
144
            transform = transform.createInverse()
145
146
        if (deleteExisting)
147
            targetHierarchy.clearAll()
148
        list_of_transforms << transform
149
150
//        def newObjects = []
151
//        for (pathObject in pathObjects) {
152
//            newObjects << transformObject(pathObject, transform)
153
//        }
154
        //targetHierarchy.addPathObjects(newObjects)
155
        //targetImage.saveImageData(targetImageData)
156
    }
157
}
158
list_of_reference_image_names=list_of_reference_image_names.unique()
159
160
//create linkedhashmap from list of image names and corresponding transforms
161
 all_moving_file_map=[list_of_moving_image_names,list_of_transforms].transpose().collectEntries{[it[0],it[1]]}
162
163
//get currentImageName. NOTE, ONLY RUN SCRIPT ON REFERENCE IMAGES.
164
print("Current image name: " + currentImageName);
165
if (!currentImageName.contains(refStain))
166
    //print 'WARNING: non-reference image name detected. Only run script on reference images'
167
    throw new Exception("WARNING: non-reference image name detected: " + currentImageName + ". Only run script on reference images")
168
currentRefSlideName=currentImageName.split('_')
169
currentRefSlideName=currentRefSlideName[0]
170
print 'Processing: ' + currentRefSlideName
171
//Only keep entries that pertain to transforms relevant to images sharing the same SlideID and exclude any that contain
172
// refStain (there shouldn't be any with refStain generated as it's created below as an identity matrix, however running
173
// calculate-transforms.groovy with different refStains set can cause them to be generated, and override the identity matrix set below)
174
//print 'all_moving_file_map: ' + all_moving_file_map
175
filteredMap= all_moving_file_map.findAll {it.key.contains(currentRefSlideName) && !it.key.contains(refStain)}
176
//print 'filteredMap' + filteredMap
177
178
def reference_transform_map = [
179
        (currentImageName) : new AffineTransform()
180
]
181
182
transforms=reference_transform_map + filteredMap
183
184
185
// Loop through the transforms to create a server that merges these
186
def project = getProject()
187
def servers = []
188
def channels = []
189
int c = 0
190
for (def mapEntry : transforms.entrySet()) {
191
    print 'mapentry: ' + mapEntry
192
    
193
    // Find the next image & transform
194
    def name = mapEntry.getKey()
195
    print(name)
196
    def transform = mapEntry.getValue()
197
    if (transform == null)
198
        transform = new AffineTransform()
199
    def entry = project.getImageList().find {it.getImageName() == name}
200
201
    // Read the image & check if it has stains (for deconvolution)
202
    def imageData = entry.readImageData()
203
    def currentServer = imageData.getServer()
204
    def stains = imageData.getColorDeconvolutionStains()
205
    print(stains)
206
    
207
    // Nothing more to do if we have the identity trainform & no stains
208
    if (transform.isIdentity() && stains == null) {
209
        channels.addAll(updateChannelNames(name, currentServer.getMetadata().getChannels()))
210
        servers << currentServer
211
        continue
212
    } else {
213
        // Create a server to apply transforms
214
        def builder = new TransformedServerBuilder(currentServer)
215
        if (!transform.isIdentity())
216
            builder.transform(transform)
217
        // If we have stains, deconvolve them
218
        if (performDeconvolution==false)
219
            stains=null // Mark's way of disabling stain deconvolution if a brightfield image is present
220
        if (stains != null) {
221
            builder.deconvolveStains(stains)
222
            for (int i = 1; i <= 3; i++)
223
                channels << ImageChannel.getInstance(name + "-" + stains.getStain(i).getName(), ImageChannel.getDefaultChannelColor(c++))
224
        } else {
225
            channels.addAll(updateChannelNames(name, currentServer.getMetadata().getChannels()))
226
        }
227
228
        //Mark modification: in addition to writing out deconvolved channels, include original RGB channels for viewing purposes
229
        //Currently unsupported due to bugs when used in conjuction with fluorescent images, will leave the 2 lines below commented out
230
        //channels.addAll(updateChannelNames(name, currentServer.getMetadata().getChannels()))
231
        //servers << currentServer
232
233
        servers << builder.build()
234
    }
235
}
236
237
println 'Channels: ' + channels.size()
238
239
// Remove the first server - we need to use it as a basis (defining key metadata, size)
240
ImageServer<BufferedImage> server = servers.remove(0)
241
// If anything else remains, concatenate along the channels dimension
242
if (!servers.isEmpty())
243
    server = new TransformedServerBuilder(server)
244
            .concatChannels(servers)
245
            .build()
246
247
// Write the image or open it in the viewer
248
if (pathOutput != null) {
249
    if (outputDownsample > 1)
250
        server = ImageServers.pyramidalize(server, outputDownsample)
251
    writeImage(server, pathOutput)
252
} else {
253
    // Create the new image & add to the project
254
    def imageData = new ImageData<BufferedImage>(server)
255
    setChannels(imageData, channels as ImageChannel[])
256
    Platform.runLater {
257
        getCurrentViewer().setImageData(imageData)
258
    }
259
}
260
261
// Prepend a base name to channel names
262
List<ImageChannel> updateChannelNames(String name, Collection<ImageChannel> channels) {
263
    return channels
264
            .stream()
265
            .map( c -> {
266
                return ImageChannel.getInstance(name + '-' + c.getName(), c.getColor())
267
                }
268
            ).collect(Collectors.toList())
269
}
270
print('Done')