-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathevaluate.py
executable file
·1836 lines (1594 loc) · 95 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# necessary imports
import os
import glob
import argparse
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib import cm
from matplotlib.pyplot import get_cmap
import seaborn as sns
import numpy as np
import pandas as pd
import torch
import scipy.interpolate as si
import gc
from torchvision.utils import flow_to_image
from torchvision.transforms.functional import gaussian_blur
from src.losses import HierarchicalReconstructionLoss, HierarchicalRegularization, L2_loss, Soft_dice_loss, NCC_loss, jacobian_det,JDetStd
from src.data.BraTS import brats
from src.data.OASIS import oasis
from src.models import PULPo
os.environ['NEURITE_BACKEND'] = 'pytorch'
# TODO: import vxm?
os.environ['VXM_BACKEND'] = 'pytorch'
# seeding for reproducibility
torch.manual_seed(0)
np.random.seed(0)
class Evaluate():
def __init__(self):
self.checkpoint_folder = "checkpoints/best-reconstruction*.ckpt"
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#####################################################################################################
####### LOAD DATA #####################################################################
#####################################################################################################
self.num_inputs = ...
self.loaders = ...
self.loader_names = ...
self.segs = False
self.lms = False
self.mask = False
self.grid_size = 20
#####################################################################################################
####### SELECT METRICS ################################################################
#####################################################################################################
self.metric_names = ...
self.metrics = ...
self.num_datasets, self.num_metrics = (..., ...)
self.model = None
self.latent_levels = ...
self.models = ...
self.num_models = ...
self.model_names = ...
self.output_dir = ...
#####################################################################################################
####### HELPERS FOR DATA LOADING AND MODEL PASSES ################################################
#####################################################################################################
def build_path(self, model_dir,name):
""" Builds the path to the checkpoint file."""
filepath = model_dir+"/"+name+"/"+self.checkpoint_folder
checkpoint = glob.glob(filepath)[0]
return checkpoint
def load_model(self, model_dir, git_hash, version):
""" Loads the PULPo model from a given directory. git_has and version required as this is
how the trained models are saved."""
name = git_hash+"/"+version
checkpoint = self.build_path(model_dir, name)
self.output_dir = model_dir + "/" + name + "/" + "evaluation"
os.makedirs(self.output_dir, exist_ok=True)
model = PULPo.load_from_checkpoint(checkpoint).to(self.device)
model.eval()
self.model = model
self.latent_levels = model.latent_levels
return model
def load_vxm(self,model_dir,name):
""" Loads the DIF-VM (vxm) baseline model from a given directory."""
path = model_dir+"/"+name+".pt"
self.model = vxm.networks.VxmDense.load(path, self.device)
self.model.to(self.device)
return self.model
def load_data(self, task, segs, lms, mask, ndims):
""" Loads the data loaders for the given task."""
self.segs = segs
self.lms = lms
self.mask = mask
if task == "oasis":
self.task = "oasis"
(
self.train_loader,
self.validation_loader,
self.test_loader_seg,
self.test_loader_lm,
) = oasis.create_data_loaders(batch_size=1, segs=segs, lms=lms, mask=mask, ndims=ndims)
self.loaders = [self.train_loader,self.validation_loader,self.test_loader_seg,self.test_loader_lm]
self.loader_names = ["train","val","test_seg","test_lm"]
elif task == "brats":
self.task = "brats"
(
self.train_loader,
self.validation_loader,
self.test_loader,
) = brats.create_data_loaders(batch_size=1, segs=segs, lms=lms, mask=mask, ndims=ndims)
self.loaders = [self.train_loader,self.validation_loader,self.test_loader]
self.loader_names = ["train","val","test"]
else:
raise Exception(f"Task {task} does not exist." )
# metrics
self.metrics = [L2_loss, JDetStd, jacobian_det]
self.metric_names = ["RMSE", "JDetStd", "JDetLeq0"]
if self.segs:
self.metrics += [Soft_dice_loss]
self.metric_names += ["Dice"]
if self.lms:
self.metrics += [self.lm_mae, self.lm_euclid]
self.metric_names += ["LM_MAE", "LM_Euclid"]
self.num_datasets, self.num_metrics = len(self.loaders), len(self.metrics)
self.num_inputs = self.train_loader.dataset.__len__()
return
def sample_data(self, loader_name, index=0):
""" Samples one datapoint from the given loader. """
# check which element in self.loader_names corresponds to loader_name
loader_idx = self.loader_names.index(loader_name)
loader = self.loaders[loader_idx]
if index >= len(loader):
raise ValueError(f"Index {index} is out of range for loader {loader_name}.")
if index != 0:
print(f"Warning: You are not using the first element of the loader {loader_name}.")
for i, input in enumerate(loader):
if i == index:
break
else:
input = next(iter(loader))
x,y,seg1,seg2,lm1,lm2,mask_x,mask_y = input
return [x.to(self.device), y.to(self.device), seg1.to(self.device), seg2.to(self.device), lm1.to(self.device), lm2.to(self.device), mask_x.to(self.device), mask_y.to(self.device), loader_name]
def predict(self, inputs, num_samples=20, deterministic=False):
""" Generates a prediction given the inputs with the current model."""
with torch.no_grad():
model = self.model
x,y,seg_x,seg_y,lm_x,lm_y,mask_x,mask_y,loader = inputs
if deterministic and num_samples != 1:
raise Exception("Deterministic predictions with more than 1 sample make no sense!")
if num_samples==1:
if deterministic:
outputs, individual_dfs = model.predict_deterministic(x,y)
prediction_name = "deterministic_prediction"
else:
outputs, individual_dfs = model.predict(x,y, N=num_samples)
prediction_name = "sample_prediction"
combined_dfs, final_dfs = model.combine_dfs(individual_dfs)
if self.segs:
warped_seg = {key:model.autoencoder.decoders[key].spatial_transform(final_dfs[key], seg_x) for key in final_dfs}
else:
warped_seg = {}
warped_seg[0] = torch.empty((0,), dtype=torch.float32)
return ([outputs[0], final_dfs[0], warped_seg[0], outputs, individual_dfs, combined_dfs,final_dfs, warped_seg, prediction_name],[])
else: # num_samples > 1
prediction_name = f"avg_prediction_over_{str(num_samples)}_samples"
if self.model.ndims == 3:
# we don't use segmentations for 3D uncertainty due to memory constraints
warped_seg = {}
warped_seg[0] = torch.empty((0,), dtype=torch.float32)
all_warped_seg = warped_seg
# we can't batch 3D predictions, so we have to predict iteratively
latent_levels = model.latent_levels
level_sizes = {0:[dim for dim in model.input_size]}
for k in range(model.total_levels-1):
curr = torch.ceil(torch.tensor(level_sizes[k])/2)
level_sizes[k+1] = [int(curr[i].item()) for i in range(len(model.input_size))]
# the outputs and final df are on the highest resolution on level 0
all_outputs = {key: torch.zeros((num_samples,1,*level_sizes[0]),device=self.device) if key==0 else
torch.zeros((num_samples,1,*level_sizes[key+model.lk_offset]),device=self.device) for key in range(latent_levels)}
all_final_dfs = {key: torch.zeros((num_samples,3,*level_sizes[0]),device=self.device) if key==0 else
torch.zeros((num_samples,3,*level_sizes[key+model.lk_offset]),device=self.device) for key in range(latent_levels)}
# the individual and combined df are not necessarily on the highest resolution on level 0
all_individual_dfs = {key: torch.zeros((num_samples,3,*level_sizes[key+model.lk_offset]),device=self.device) for key in range(latent_levels)}
all_combined_dfs = {key: torch.zeros((num_samples,3,*level_sizes[key+model.lk_offset]),device=self.device) for key in range(latent_levels)}
for i in range(num_samples):
outputs, individual_dfs = model.predict(x,y, N=1)
combined_dfs, final_dfs = model.combine_dfs(individual_dfs)
for key in range(latent_levels):
all_outputs[key][i] = outputs[key][0]
all_individual_dfs[key][i] = individual_dfs[key][0]
all_combined_dfs[key][i] = combined_dfs[key][0]
all_final_dfs[key][i] = final_dfs[key][0]
# calculate the averages
individual_dfs = {key:individual_dfs[key].mean(dim=0).unsqueeze(0) for key in all_individual_dfs}
combined_dfs, final_dfs = model.combine_dfs(individual_dfs)
outputs = {key:model.autoencoder.decoders[key].spatial_transform(final_dfs[key], x) for key in final_dfs}
# calculate the stds
output_std = {key:torch.mean(torch.std(all_outputs[key], axis=0),axis=0) for key in all_outputs}
if self.mask:
# mask the dfs for std calculation. The mask is first pooled to the level sizes and then warped.
warped_mask = {key:model.autoencoder.decoders[key].spatial_transform(final_dfs[key], mask_x) for key in final_dfs}
individual_df_std = {key:torch.mean(torch.std(all_individual_dfs[key], axis=0),axis=0) for key in all_outputs}
final_df_std = {key:torch.mean(torch.std(all_final_dfs[key]*model.autoencoder.decoders[key].spatial_transform(final_dfs[key], mask_x), axis=0),axis=0) for key in all_outputs}
else:
individual_df_std = {key:torch.mean(torch.std(all_individual_dfs[key], axis=0),axis=0) for key in all_outputs}
final_df_std = {key:torch.mean(torch.std(all_final_dfs[key], axis=0),axis=0) for key in all_outputs}
else: # ndims == 2
outputs, individual_dfs = model.predict_output_samples(x,y, N=num_samples)
if self.segs:
seg_xn = torch.vstack([seg_x for _ in range(num_samples)])
else:
warped_seg = {}
warped_seg[0] = torch.empty((0,), dtype=torch.float32)
all_warped_seg = warped_seg
# save these for later
all_outputs = {key:outputs[key].squeeze(0) for key in outputs}
all_individual_dfs = {key:individual_dfs[key].squeeze(0) for key in individual_dfs}
individual_dfs = {key:individual_dfs[key].mean(dim=1) for key in individual_dfs}
combined_dfs, final_dfs = model.combine_dfs(individual_dfs)
all_combined_dfs, all_final_dfs = model.combine_dfs(all_individual_dfs)
if self.segs:
warped_seg = {key:model.autoencoder.decoders[key].spatial_transform(final_dfs[key], seg_x) for key in final_dfs}
all_warped_seg = {key:model.autoencoder.decoders[key].spatial_transform(all_final_dfs[key], seg_xn) for key in all_final_dfs}
outputs = {key:model.autoencoder.decoders[key].spatial_transform(final_dfs[key], x) for key in final_dfs}
# calculate the stds
output_std = {key:torch.mean(torch.std(all_outputs[key], axis=0),axis=0) for key in all_outputs}
individual_df_std = {key:torch.mean(torch.std(all_individual_dfs[key], axis=0),axis=0) for key in all_outputs}
final_df_std = {key:torch.mean(torch.std(all_final_dfs[key], axis=0),axis=0) for key in all_outputs}
return ([outputs[0], final_dfs[0], warped_seg[0], outputs, individual_dfs, combined_dfs, final_dfs, warped_seg, prediction_name],
[output_std, individual_df_std, final_df_std, all_outputs, all_individual_dfs, all_combined_dfs, all_final_dfs, all_warped_seg])
def predict_vxm(self, moving, fixed, num_samples=20, deterministic=False):
""" Generates a prediction given the inputs with the current DIF-VM baseline model."""
if deterministic and num_samples != 1:
raise Exception("Deterministic predictions can only be made for 1 sample.")
if deterministic and num_samples == 1:
prediction_name = "deterministic_prediction"
else:
prediction_name = f"avg_prediction_over_{str(num_samples)}_samples"
with torch.no_grad():
inshape = moving.shape[2:]
all_moved = torch.zeros((num_samples,1,*inshape), device=moving.device)
all_warp = torch.zeros((num_samples,3,*inshape), device=moving.device)
for i in range(num_samples):
moved, warp, _ = self.model(moving, fixed, registration=True, deterministic=deterministic)
all_moved[i] = moved[0]
all_warp[i] = warp[0]
# calculate the averages
avg_moved = torch.mean(all_moved, axis=0).unsqueeze(0)
avg_warp = torch.mean(all_warp, axis=0).unsqueeze(0)
# calculate the stds
moved_std = torch.mean(torch.std(all_moved, axis=0),axis=0)
warp_std = torch.mean(torch.std(all_warp, axis=0),axis=0)
return ([avg_moved, avg_warp, [], [], [], [], [], [], prediction_name],
[moved_std, [], warp_std, all_moved, [], [], all_warp, []])
##################################################################################################################
######## METRICS ########################################################
##################################################################################################################
def rmse(self, input,target):
""" Computes the root mean squared error between two images. """
criterion = torch.nn.MSELoss()
mse = criterion(input, target)
return torch.sqrt(mse)
def dsc(self, input,target):
""" Computes the dice similarity coefficient between two segmentation maps. """
input_size = input.size()[2:]
sumdims = list(range(2, len(input_size) + 2))
epsilon = 1e-6
dsc = ((2. * target*input).mean(dim=sumdims) + epsilon) / ((target**2).mean(dim=sumdims) + (input**2).mean(dim=sumdims) + epsilon)
return dsc.mean()
def jdet(self, df):
""" Computes the jacobian determinant of a displacement field. """
jdet = jacobian_det(df, normalize=True)
return jdet
def ncc(self,a,v, zero_norm=True):
"""Computes the normalized cross correaltion between two arrays
Args:
a (np.array): first array
v (np.array): second array
Returns:
float: the normalized cross correlation between arrays a and v
"""
a = a.flatten()
v = v.flatten()
eps = 1e-15
if zero_norm:
a = (a - np.mean(a)) / (np.std(a) * len(a) + eps)
v = (v - np.mean(v)) / (np.std(v) + eps)
else:
a = (a) / (np.std(a) * len(a) + eps)
v = (v) / (np.std(v) + eps)
return np.correlate(a, v)[0]
def lm_mae(self, lm1, lm2):
""" Computes the median manhattan distance (median absolute error) between two sets of landmarks
Args:
lm1 (torch.Tensor): first set of landmarks
Shape: (1, n_landmarks, 3)
lm2 (torch.Tensor): second set of landmarks
Shape: (1, n_landmarks, 3)
Returns:
float: the median manhattan distance between the landmarks
"""
distance = torch.abs(lm1-lm2).sum(dim=2)
return torch.median(distance)
def lm_euclid(self, lm1, lm2):
""" Computes the mean euclidean distance between two sets of landmarks
Args:
lm1 (torch.Tensor): first set of landmarks
Shape: (1, n_landmarks, 3)
lm2 (torch.Tensor): second set of landmarks
Shape: (1, n_landmarks, 3)
Returns:
float: the mean euclidean distance between the landmarks
"""
distance = torch.sqrt(((lm1-lm2)**2).sum(dim=2))
return torch.mean(distance)
def lms_var(self, lms):
""" Computes the variance of the euclidean distance between landmarks
Args:
lms (torch.Tensor): set of landmarks
Shape: (n_samples, n_landmarks, 3)
Returns:
torch.Tensor: the variance of the euclidean distance between landmarks
Shape: (n_landmarks,3)
"""
return torch.mean(torch.var(lms, dim=0),dim=-1)
def lms_corr(self, lm_hat, lms, lm):
""" Computes the normalized cross correlation between the mean squared error and the variance of the landmarks
Args:
lm_hat (torch.Tensor): predicted landmarks
Shape: (n_landmarks, 3)
lms (torch.Tensor): set of sample prediction landmarks
Shape: (n_samples, n_landmarks, 3)
lm (torch.Tensor): ground truth landmarks
Shape: (n_landmarks, 3)
Returns:
float: the normalized cross correlation between the mean squared error and the variance of the landmarks
"""
error = torch.mean((lm_hat - lm)**2, dim=-1).flatten()
variance = self.lms_var(lms).flatten()
error_normed = (error - torch.mean(error)) / (torch.std(error) * len(error))
variance_normed = (variance - torch.mean(variance)) / (torch.std(variance))
return np.correlate(error_normed.to("cpu"), variance_normed.to("cpu"))[0]
def warp_landmarks(self, lm: torch.Tensor, df:torch.Tensor) -> torch.Tensor:
""" Warps a set of landmarks using a displacement field
Args:
lm (torch.Tensor): set of landmarks
Shape: (1, n_landmarks, 3)
df (torch.Tensor): displacement field
Shape: (n_samples, ndims, H, W, D)
Returns:
torch.Tensor: the warped landmarks
Shape: (1, n_landmarks, 3)
"""
lm = lm.long()
new_lm = lm - df[:,:,lm[0,:,0],lm[0,:,1],lm[0,:,2]].transpose(-2,-1)
return new_lm
##################################################################################################################
######## Helpers for tables and visualizations ########################################################
##################################################################################################################
# adapted from https://stackoverflow.com/questions/24612626/b-spline-interpolation-with-python
def bspline_interpolate(self, points):
""" Interpolates a set of points using a b-spline. We use this to interpolate the
line segments of the control point grid that visualizes the displacement fields.
"""
points = points.tolist()
degree = 3
dist_x_left = points[1][0]-points[0][0]
y_left = points[0][1]
dist_x_right = points[-1][0]-points[-2][0]
y_right = points[-1][1]
points = [[points[0][0] - dist_x_left,y_left]] + points + [[points[-1][0] + dist_x_right,y_right],[points[-1][0] + 2*dist_x_right,y_right]]
points = np.array(points)
n_points = len(points)
x = points[:,0]
y = points[:,1]
t = range(len(x))
ipl_t = np.linspace(1.0, len(points) - degree, 1000)
x_tup = si.splrep(t, x, k=degree, per=1)
y_tup = si.splrep(t, y, k=degree, per=1)
x_list = list(x_tup)
xl = x.tolist()
x_list[1] = [0.0] + xl + [0.0, 0.0, 0.0, 0.0]
y_list = list(y_tup)
yl = y.tolist()
y_list[1] = [0.0] + yl + [0.0, 0.0, 0.0, 0.0]
x_i = si.splev(ipl_t, x_list)
y_i = si.splev(ipl_t, y_list)
return np.stack((np.array(x_i), np.array(y_i)), axis=1)
def create_warped_grid(self, df, grid_size):
""" Creates a grid of control points that are warped by the displacement field.
This grid is used to visualize the displacement field.
Args:
df (torch.Tensor): displacement field
Shape: (1, ndims, H, W(, D))
control_points (int): number of control points
Returns:
grid_i, grid_j (torch.Tensor, torch.Tensor): The unstacked grid components
Shape: (control_points, control_points), (control_points, control_points)
"""
grid_i,grid_j = torch.meshgrid(torch.linspace(0,df.shape[-2]-1,grid_size),torch.linspace(0,df.shape[-1]-1,grid_size), indexing="ij")
grid = torch.stack((grid_i,grid_j))
grid = grid.type(torch.float32)
for i in range(grid.shape[-2]):
for j in range(grid.shape[-1]):
gi, gj = grid[:,i,j].int()
grid[:,i,j] -= df[0,:,gi,gj]
grid_i = grid[0,:,:]
grid_j = grid[1,:,:]
return grid_i, grid_j
def plot_grid(self,x,y, scatter=False, lines=True, straightlines=False, ax=None, **kwargs):
""" Plots a grid of control points that are warped by the displacement field.
This grid is used to visualize the displacement field.
Args:
x (torch.Tensor): The unstacked x-components of the grid
Shape: (control_points, control_points)
y (torch.Tensor): The unstacked y-components of the grid
Shape: (control_points, control_points)
Returns:
None
"""
ax = ax or plt.gca()
# horizontal line segments
segs1 = np.stack((y,x), axis=2)
# create bspline-interpolated horizontal lines
int_lines1 = np.zeros(shape=(segs1.shape[0],1000,2))
for i in range(segs1.shape[0]):
int_lines1[i] = self.bspline_interpolate(segs1[i])
# vertical lines
segs2 = segs1.transpose(1,0,2)
# create bspline-interpolated vertical lines
int_lines2 = np.zeros(shape=(segs1.shape[0],1000,2))
for i in range(segs2.shape[0]):
int_lines2[i] = self.bspline_interpolate(segs2[i])
# optionally scatter the control points
if scatter:
ax.scatter(y,x, **kwargs, s=6)
# plot the lines
if straightlines:
ax.add_collection(LineCollection(segs1, **kwargs))
ax.add_collection(LineCollection(segs2, **kwargs))
# plot the interpolated lines
if lines:
ax.add_collection(LineCollection(int_lines1, **kwargs))
ax.add_collection(LineCollection(int_lines2, **kwargs))
return
##################################################################################################################
######## Helpers for the tables ###############################################################
##################################################################################################################
def convert_to_scientific(self,value):
""" Converts a value to scientific notation if it is close to zero.
Args:
value (float): The value to convert
Returns:
str: The value in scientific notation
"""
if abs(value) < 0.001 and abs(value) > 0.0:
return format(value, '.2e')
else:
return value
# creates a table from a df and saves it as a tex and svg file
# if no name is given, the table is not saved
def make_tables(self, df, output_dir, show=False, name=None, fontsize=4):
""" Creates a table from a dataframe and saves it as a tex and svg file.
If no name is given, the table is not saved."""
df = df.applymap(self.convert_to_scientific)
# write latex table
latex_table = df.style.to_latex()
# export dataframe as svg file
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
table = ax.table(cellText=df.values, colLabels=df.columns, rowLabels=df.index, loc='center')
table.auto_set_font_size(False)
table.set_fontsize(fontsize)
table.auto_set_column_width(col=list(range(len(df.columns))))
fig.tight_layout()
if show == True:
print(latex_table)
plt.show()
if name != None:
with open(output_dir+"/"+name+".tex","w+") as f:
f.writelines(latex_table)
fig.savefig(output_dir+"/"+name+".svg")
return
def table_jdet(self, inputs, preds, output_dir=None, name="", show=True, save=False, fontsize=4):
""" Creates a table with the standard deviation of the jacobian determinant and
the percentage of pixels with jacobian determinant <=0 for the combined and individual deformation fields.
"""
y_pred, df_pred, y_pred_seg, outputs, individual_dfs, combined_dfs, final_dfs, warped_seg, prediction_name = preds
output_dir = output_dir if output_dir is not None else self.output_dir
data = np.zeros((self.latent_levels,4))
for key in final_dfs.keys():
final_dfs[key] = final_dfs[key].to("cpu")
individual_dfs[key] = individual_dfs[key].to("cpu")
for l in reversed(range(self.latent_levels)):
# combined DF
jdet = jacobian_det(final_dfs[l]).detach()
data[l,0] = jdet.std()
jdet_leq0 = np.where(jdet <= 0, 1, 0)
data[l,1] = (np.sum(jdet_leq0==1) / np.sum(np.ones_like(jdet_leq0)))*100
# individual DF
jdet = jacobian_det(individual_dfs[l]).detach()
data[l,2] = jdet.std()
jdet_leq0 = np.where(jdet <= 0, 1, 0)
data[l,3] = (np.sum(jdet_leq0==1) / np.sum(np.ones_like(jdet_leq0)))*100
supcol = np.repeat(["combined DF", "individual DF"],2)
subcol = np.tile(["JDet std", f"% of pixels <= 0"],2)
mux = pd.MultiIndex.from_arrays((supcol, subcol))
df = pd.DataFrame(data, columns=mux).round(3)
df.index.name = "Level"
if save:
self.make_tables(df, output_dir, show=show, name="jdet_"+name, fontsize=4)
else:
self.make_tables(df, output_dir, show=show, fontsize=4)
return
##################################################################################################################
######## Space for the visualizations #################################################################
##################################################################################################################
def artifact(self, image_tensor, method, x, y, z=None):
"""
Inserts an artificial artifact into a region of an image.
Args:
image_tensor (torch.Tensor): A PyTorch tensor representing the image.
method (str): The method to use for creating the artifact. Options are "blur", "noise", "mean", "white", "black", and "checkerboard".
x (tuple): The x-coordinates of the region to insert the artifact.
y (tuple): The y-coordinates of the region to insert the artifact.
z (tuple): The z-coordinates of the region to insert the artifact. Only required for 3D images.
Returns:
torch.Tensor: A new PyTorch tensor representing the augmented image.
"""
inshape = image_tensor.shape[2:]
if len(inshape) == 2 and z is not None:
raise ValueError("Ddist must be None for 2D images")
if len(inshape) == 3 and z is None:
raise ValueError("Ddist must be specified for 3D images")
# Select artifact region
if z is None:
roi = image_tensor[...,x[0]:x[1],y[0]:y[1]]
else:
roi = image_tensor[...,x[0]:x[1],y[0]:y[1],z[0]:z[1]]
# Apply the blur operation to the region
if method == "blur": # only works on 2D
roi = gaussian_blur(roi, kernel_size=int(5*2)+1, sigma=5)
elif method == "noise":
my_mean = roi.mean()
my_std = roi.std()
roi = torch.normal(my_mean, my_std, size=roi.shape)
elif method == "mean":
roi = roi.mean()
elif method == "white":
roi = 1
elif method == "black":
roi = 0
elif method == "checkerboard":
distx = x[1] - x[0]
disty = y[1] - y[0]
if z is not None:
distz = z[1] - z[0]
rx = 0
ry = 0
rz = 0
color = 1
roi[:] = color
while rx < distx/2:
if z is None:
roi[...,rx:-rx,ry:-ry] = color
else:
roi[...,rx:-rx,ry:-ry,rz:-rz] = color
rx += int(distx/10)
ry += int(disty/10)
if z is not None:
rz += int(distz/10)
color = 1-color
else:
raise ValueError("Method not recognized")
if z is None:
res = image_tensor.clone()
res[...,x[0]:x[1],y[0]:y[1]] = roi
else:
res = image_tensor.clone()
res[...,x[0]:x[1],y[0]:y[1],z[0]:z[1]] = roi
return res
# visualize one or several visualizations together in one plot
# visualizations is a list of functions
def visualize(self, inputs, preds, visualizations, all_preds=None, rowparams={}, title="", show=False, save_path=None):
""" Visualizes one or several visualization together in one plot.
Args:
inputs (list): The inputs as returned by sample_data()
preds (list): The predictions as returned by predict()
visualizations (list): Which visualization methods to plot
all_preds (list): The additional predictions as returned by predict()
rowparams (dict): An optional dictionary giving additional parameters to selected rows
title (str): The title of the plot
show (bool): Whether to show the plot
save_path (str): The path to save the plot
"""
# SLICE TO 2D
new_inputs = []
new_preds = []
new_all_preds = []
if len(inputs[0].shape[2:]) == 3:
for i in range(len(inputs)):
if isinstance(inputs[i], dict):
# skip it if it is empty
# important for when there's no segmentation
if not inputs[i][0].numel():
new_inputs.append(inputs[i])
else:
new_dict = {}
for key in inputs[i]:
slice_index = inputs[i][key].shape[-2] // 2
new_dict[key] = inputs[i][key][...,slice_index,:]
new_inputs.append(new_dict)
elif isinstance(inputs[i], str):
# for the dataset name
new_inputs.append(inputs[i])
else:
if not inputs[i].numel():
new_inputs.append(inputs[i])
else:
slice_index = inputs[i].shape[-2] // 2
new_inputs.append(inputs[i][...,slice_index,:])
for i in range(len(preds)):
if isinstance(preds[i], dict):
# skip it if the dict is empty
# important for when there's no segmentation
if not preds[i][0].numel():
new_preds.append(preds[i])
else:
new_dict = {}
for key in preds[i]:
slice_index = preds[i][key].shape[-2] // 2
new_dict[key] = preds[i][key][...,slice_index,:]
if new_dict[key].shape[-3] == 3:
new_dict[key] = torch.stack([new_dict[key][...,0,:,:], new_dict[key][...,2,:,:]],dim=-3)
new_preds.append(new_dict)
elif isinstance(preds[i], str):
# for the prediction name
new_preds.append(preds[i])
else:
# skip it if the dict is empty
# important for when there's no segmentation
if not preds[i].numel():
new_preds.append(preds[i])
else:
slice_index = preds[i].shape[-2] // 2
new_pred = preds[i][...,slice_index,:]
if new_pred.shape[-3] == 3:
new_pred = torch.stack([new_pred[...,0,:,:], new_pred[...,2,:,:]],dim=-3)
new_preds.append(new_pred)
if all_preds is not None:
for i in range(len(all_preds)):
if isinstance(all_preds[i], dict):
# skip it if the dict is empty
# important for when there's no segmentation
if not all_preds[i][0].numel():
new_all_preds.append(all_preds[i])
else:
new_dict = {}
for key in all_preds[i]:
slice_index = all_preds[i][key].shape[-2] // 2
new_dict[key] = all_preds[i][key][...,slice_index,:]
if len(new_dict[key].shape) > 2:
if new_dict[key].shape[-3] == 3:
new_dict[key] = torch.stack([new_dict[key][...,0,:,:], new_dict[key][...,2,:,:]],dim=-3)
new_all_preds.append(new_dict)
else:
# skip it if the dict is empty
# important for when there's no segmentation
if not all_preds[i].numel():
new_all_preds.append(all_preds[i])
else:
slice_index = all_preds[i].shape[-2] // 2
new_all_pred = all_preds[i][...,slice_index,:]
if len(new_all_pred.shape) > 2:
if new_all_pred.shape[-3] == 3:
new_all_pred = torch.stack([new_all_pred[...,0,:,:], new_all_pred[...,2,:,:]],dim=-3)
new_all_preds.append(new_all_pred)
inputs = new_inputs
preds = new_preds
all_preds = new_all_preds
# transfer to cpu for plotting
new_inputs = [{key: inputs[i][key].to("cpu") for key in inputs[i].keys()} if isinstance(inputs[i], dict) else inputs[i].to("cpu") for i in range(len(inputs)-1)]
new_inputs.append(inputs[-1])
new_preds = [{key: preds[i][key].to("cpu") for key in preds[i].keys()} if isinstance(preds[i], dict) else ([] if preds[i]==[] else preds[i].to("cpu")) for i in range(len(preds)-1)]
new_preds.append(preds[-1])
if all_preds != []:
new_all_preds = [{key: all_preds[i][key].to("cpu") for key in all_preds[i].keys()} if isinstance(all_preds[i], dict) else ([] if preds[i]==[] else preds[i].to("cpu")) for i in range(len(all_preds))]
inputs = new_inputs
preds = new_preds
all_preds = new_all_preds
# visualize
rows = len(visualizations)
cols = 4
fig, ax = plt.subplots(rows,cols)
fig.set_figwidth(30)
fig.set_figheight(30 * rows/self.latent_levels)
# cax = fig.add_axes([0.95, 0.05, 0.02, 0.95]) #this locates the axis that is used for a colorbar. It is scaled 0 - 1.
if title == None:
...
elif title != "":
fig.suptitle(title + f". {preds[-1]} on the {inputs[-1]} set.", fontsize=16)
else:
fig.suptitle(f" {preds[-1]} on the {inputs[-1]} set.", fontsize=16)
if len(visualizations) == 1: # necessary, so that calling ax[r,c] works even if there is only one row
ax = [ax]
for r in range(rows):
if visualizations[r] in [self.vis_output_var_per_level, self.vis_individual_df_var_per_level, self.vis_final_df_var_per_level, self.vis_sample_preds, self.vis_sample_segpreds, self.vis_sample_dfs]:
if r in rowparams.keys():
visualizations[r](ax[r], inputs, preds, all_preds, **rowparams[r])
else:
visualizations[r](ax[r], inputs, preds, all_preds)
elif r in rowparams.keys():
visualizations[r](ax[r], inputs, preds, **rowparams[r])
else:
visualizations[r](ax[r], inputs, preds)
for c in range(cols):
ax[r][c].set_xticks([], [])
ax[r][c].set_yticks([], [])
# use the following if you want to add a colorbar
# if r == 0:
# fig.colorbar(ax[0][3], cax, orientation = 'vertical') #'ax0' tells it which plot to base the colors on
if save_path is not None:
fig.savefig(save_path)
if show:
plt.show()
plt.close()
return
def vis_x_pred_y(self, ax, inputs, preds, vmin=0, vmax=1):
""" Visualizes the moving image, the predicted fixed image, the ground truth fixed image and the flow field."""
x,y,seg_x,seg_y,lm_x,lm_y,mask_x,mask_y,loader = inputs
y_pred, df_pred, y_pred_seg, outputs, individual_dfs, combined_dfs, final_dfs, warped_seg, prediction_name = preds
ax[0].imshow(np.rot90(x[0,0]), "gray", vmin=vmin, vmax=vmax)
ax[1].imshow(np.rot90(y_pred[0,0]), "gray", vmin=vmin, vmax=vmax)
ax[2].imshow(np.rot90(y[0,0]), "gray", vmin=vmin, vmax=vmax)
ax[3].imshow(np.rot90(flow_to_image(final_dfs[0]).permute(0,2,3,1)[0]))
ax[0].set_xlabel("Input")
ax[1].set_xlabel("Prediction")
ax[2].set_xlabel("Target")
ax[3].set_xlabel("DF")
ax[0].set_ylabel("input vs prediction")
# colorbar
cmap = get_cmap('hsv')
cbar = plt.colorbar(cm.ScalarMappable(cmap=cmap), ax=ax[3])
cbar.set_ticks([0.18,0.51,0.7,1.0])
cbar.set_ticklabels(["\u2190","\u2193","\u2192", "\u2191"])
return
def vis_segx_segpred_segy(self, ax, inputs, preds):
""" Visualizes the moving segmentation, the predicted fixed segmentation, the ground truth fixed segmentation and the flow field."""
x,y,seg_x,seg_y,lm_x,lm_y,mask_x,mask_y,loader = inputs
y_pred, df_pred, y_pred_seg, outputs, individual_dfs, combined_dfs, final_dfs, warped_seg, prediction_name = preds
maxseg_x = seg_x[0].argmax(0)
maxseg_y = seg_y[0].argmax(0)
max_y_pred_seg = y_pred_seg[0].argmax(0)
ax[0].imshow(np.rot90(maxseg_x))
ax[1].imshow(np.rot90(max_y_pred_seg))
ax[2].imshow(np.rot90(maxseg_y))
ax[3].imshow(np.rot90(flow_to_image(df_pred).permute(0,2,3,1)[0]))
ax[0].set_xlabel("Input")
ax[1].set_xlabel("Prediction")
ax[2].set_xlabel("Target")
ax[3].set_xlabel("DF")
ax[0].set_ylabel("segmentation input vs prediction")
# colorbar
cmap = get_cmap('hsv')
cbar = plt.colorbar(cm.ScalarMappable(cmap=cmap), ax=ax[3])
cbar.set_ticks([0.18,0.51,0.7,1.0])
cbar.set_ticklabels(["\u2190","\u2193","\u2192", "\u2191"])
return
def vis_pred_per_level(self, ax, inputs, preds, vmin=0, vmax=1):
""" Visualizes the prediction of each output level of PULPo."""
x,y,seg_x,seg_y,lm_x,lm_y,mask_x,mask_y,loader = inputs
y_pred, df_pred, y_pred_seg, outputs, individual_dfs, combined_dfs, final_dfs, warped_seg, prediction_name = preds
for l in reversed(range(self.latent_levels)):
ax[self.latent_levels-l-1].imshow(np.rot90(outputs[l][0,0]), "gray", vmin=vmin, vmax=vmax)
ax[self.latent_levels-l-1].set_xlabel(f"Level {l}")
ax[0].set_ylabel("Predictions per level")
if self.latent_levels < 4:
for l in range(self.latent_levels,4):
ax[l].axis('off')
return
def vis_segpred_per_level(self, ax, inputs, preds):
""" Visualizes the predicted segmentation of each output level of PULPo."""
x,y,seg_x,seg_y,lm_x,lm_y,mask_x,mask_y,loader = inputs
y_pred, df_pred, y_pred_seg, outputs, individual_dfs, combined_dfs, final_dfs, warped_seg, prediction_name = preds
for l in reversed(range(self.latent_levels)):
ax[self.latent_levels-l-1].imshow(np.rot90(warped_seg[l][0].argmax(0)))
ax[self.latent_levels-l-1].set_xlabel(f"Level {l}")
ax[0].set_ylabel("Predicted segmentation per level")
if self.latent_levels < 4:
for l in range(self.latent_levels,4):
ax[l].axis('off')
return
# plot the difference of the input and the prediction of each output level
def vis_diff_input_pred(self, ax, inputs, preds, vmin=-1, vmax=1):
""" Visualizes the difference of the input/moving image and the predicted fixed image of each output level."""
x,y,seg_x,seg_y,lm_x,lm_y,mask_x,mask_y,loader = inputs
y_pred, df_pred, y_pred_seg, outputs, individual_dfs, combined_dfs, final_dfs, warped_seg, prediction_name = preds
for l in reversed(range(self.latent_levels)):
# use interpolate for x[0,0] to have the same size as outputs[l][0,0]
ax[self.latent_levels-l-1].imshow(np.rot90(outputs[l][0,0] - torch.nn.functional.interpolate(x, size=outputs[l][0,0].shape, mode="bilinear", align_corners=False)[0,0]), "gray", vmin=vmin, vmax=vmax)
ax[self.latent_levels-l-1].set_xlabel(f"Level {l}")
ax[0].set_ylabel("Difference Input / Predictions per level")
if self.latent_levels < 4:
for l in range(self.latent_levels,4):
ax[l].axis('off')
return
def vis_diff_target_pred(self, ax, inputs, preds, vmin=-1, vmax=1):
""" Visualizes the difference of the target/fixed image and the predicted fixed image of each output level."""
x,y,seg_x,seg_y,lm_x,lm_y,mask_x,mask_y,loader = inputs
y_pred, df_pred, y_pred_seg, outputs, individual_dfs, combined_dfs, final_dfs, warped_seg, prediction_name = preds
for l in reversed(range(self.latent_levels)):
# use interpolate for y[0,0] to have the same size as outputs[l][0,0]
ax[self.latent_levels-l-1].imshow(np.rot90(outputs[l][0,0] - torch.nn.functional.interpolate(y, size=outputs[l][0,0].shape, mode="bilinear", align_corners=False)[0,0]), "gray", vmin=vmin, vmax=vmax)
ax[self.latent_levels-l-1].set_xlabel(f"Level {l}")
ax[0].set_ylabel("Difference Target / Predictions per level")
if self.latent_levels < 4:
for l in range(self.latent_levels,4):
ax[l].axis('off')
return
def vis_jdet(self, ax, inputs, preds):
""" Visualizes the jacobian determinant of the final deformation field for each level of PULPo."""
x,y,seg_x,seg_y,lm_x,lm_y,mask_x,mask_y,loader = inputs
y_pred, df_pred, y_pred_seg, outputs, individual_dfs, combined_dfs, final_dfs, warped_seg, prediction_name = preds
for l in reversed(range(self.latent_levels)):
heatplot = sns.heatmap(np.rot90(jacobian_det(final_dfs[l])[0].detach()),
ax=ax[self.latent_levels-l-1],
cmap=sns.diverging_palette(10,250,sep=1,n=100),
center=0., vmin=-2,vmax=4)
# remove the legend for all but the last column
if l != 0:
heatplot.collections[0].colorbar.remove()
ax[self.latent_levels-l-1].set_xlabel(f"Level {l}")
# use to set colorbar for the last column
# colorbar = plt.colorbar(heatplot.collections[0], ax=ax[4])
ax[0].set_ylabel("heatmap of JDet std")
if self.latent_levels < 4:
for l in range(self.latent_levels,4):
ax[l].axis('off')
return
def vis_final_df_per_level(self, ax, inputs, preds, flow=True, grid=True):
""" Visualizes the final deformation field of each level of PULPo."""
x,y,seg_x,seg_y,lm_x,lm_y,mask_x,mask_y,loader = inputs
y_pred, df_pred, y_pred_seg, outputs, individual_dfs, combined_dfs, final_dfs, warped_seg, prediction_name = preds
for l in reversed(range(self.latent_levels)):
if flow==True:
ax[self.latent_levels-l-1].imshow(np.rot90(flow_to_image(final_dfs[l])[0].permute(1,2,0)))
if grid == True:
grid_i, grid_j = self.create_warped_grid(np.rot90(final_dfs[l],axes=(-2,-1)), self.grid_size)
self.plot_grid(grid_i,grid_j, ax=ax[self.latent_levels-l-1], scatter=False, lines=True, straightlines=False, color="black", linewidth=0.5)
ax[self.latent_levels-l-1].set_xlabel(f"Level {l}")
ax[0].set_ylabel("Final DF per level.")
if self.latent_levels < 4:
for l in range(self.latent_levels,4):
ax[l].axis('off')
# use to also plot a colorbar
# cmap = get_cmap('hsv')
# cbar = plt.colorbar(cm.ScalarMappable(cmap=cmap), ax=ax[3])
# cbar.set_ticks([0.18,0.51,0.7,1.0])
# cbar.set_ticklabels(["\u2190","\u2193","\u2192", "\u2191"])
return
def vis_combined_df_per_level(self, ax, inputs, preds, flow=True, grid=True):
""" Visualizes the combined deformation field of each level of PULPo."""
x,y,seg_x,seg_y,lm_x,lm_y,mask_x,mask_y,loader = inputs
y_pred, df_pred, y_pred_seg, outputs, individual_dfs, combined_dfs, final_dfs, warped_seg, prediction_name = preds
for l in reversed(range(self.latent_levels)):
if flow==True:
ax[self.latent_levels-l-1].imshow(np.rot90(flow_to_image(combined_dfs[l])[0].permute(1,2,0)))
if grid == True:
grid_i, grid_j = self.create_warped_grid(np.rot90(combined_dfs[l],axes=(-2,-1)), self.grid_size)
self.plot_grid(grid_i,grid_j, ax=ax[self.latent_levels-l-1], scatter=False, lines=True, straightlines=False, color="black", linewidth=0.5)
ax[self.latent_levels-l-1].set_xlabel(f"Level {l}")
ax[0].set_ylabel("Combined DF per level.")
if self.latent_levels < 4:
for l in range(self.latent_levels,4):
ax[l].axis('off')
# use to also plot a colorbar
# cmap = get_cmap('hsv')
# cbar = plt.colorbar(cm.ScalarMappable(cmap=cmap), ax=ax[3])
# cbar.set_ticks([0.18,0.51,0.7,1.0])
# cbar.set_ticklabels(["\u2190","\u2193","\u2192", "\u2191"])
return
def vis_individual_df_per_level(self, ax, inputs, preds, flow=True, grid=True):
""" Visualizes the individual deformation field of each level of PULPo."""
x,y,seg_x,seg_y,lm_x,lm_y,mask_x,mask_y,loader = inputs
y_pred, df_pred, y_pred_seg, outputs, individual_dfs, combined_dfs, final_dfs, warped_seg, prediction_name = preds
for l in reversed(range(self.latent_levels)):
if flow==True:
ax[self.latent_levels-l-1].imshow(np.rot90(flow_to_image(individual_dfs[l])[0].permute(1,2,0)))
if grid == True:
grid_i, grid_j = self.create_warped_grid(np.rot90(individual_dfs[l],axes=(-2,-1)), self.grid_size)
self.plot_grid(grid_i,grid_j, ax=ax[self.latent_levels-l-1], scatter=False, lines=True, straightlines=False, color="black", linewidth=0.5)
ax[self.latent_levels-l-1].set_xlabel(f"Level {l}")