-
Notifications
You must be signed in to change notification settings - Fork 0
/
moreutils.py
1915 lines (1680 loc) · 72.8 KB
/
moreutils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import numpy as np
#import torchvision.utils
import os
import torch
# For training on cluster, uncomment these 2 lines
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
from skimage.metrics import structural_similarity as ssim
from copy import deepcopy
from utils import util
from scipy.ndimage import gaussian_filter
from matplotlib.pyplot import boxplot as boxplot
import statsmodels.api as sm
import nibabel as nib
import csv
import scipy
from collections import Counter
import numpy as np
from scipy.ndimage import label, binary_dilation
from PIL import Image
import torchvision
import monai
def saveUncert(img, save_path):
img = img.detach().cpu() # We convert the image to numpy (we don't need it anymore)
max_img = img.max()
min_img = img.min()
img = (img-min_img)/(max_img-min_img) # Normalize
img = img.unsqueeze(0)
torchvision.utils.save_image(img, save_path, normalize = True) # Just in case!
def saveFigs(imgs, fig_path, create_dir = False, nlabels = 1,
sequences = [], suptitle = ""):
'''
Plots a series of images next to another for one element of the batch or all the batch.
:param imgs: Dictionary containing images
:param fig_path:Path + name to save the file
:param create_dir: Whether the directory to save the images must be created or not.
:param nlabels: Number of labels in the label image.
:param index: If -1, all the batches will be plotted in a different subplot row. Otherwise, index
is the position (0 to B-1) of the image within a specific batch that will be plotted
:return:
'''
# If directory isn't created, we create it.
if create_dir:
os.makedirs(os.path.dirname(fig_path), exist_ok=True)
# If index has a value different from -1, the index-th element of the images lists will be plotted.
n_rows = imgs[list(imgs.keys())[0]].shape[0] # Batch size
# Number of subplots
n_subplots = n_rows
# Before plotting the figure, we make duplicates, post-processing the images.
maxes = -10000
mins = 10000
imgs_to_plot = {}
for key, value in imgs.items():
if 'label' in key:
temp_i = util.tensor2label(value, nlabels)
if len(temp_i.shape) == 5:
temp_i, _ = chopSlice(temp_i[..., 0], cut='a', mode = 'middle')
imgs_to_plot[key] = temp_i
else:
imgs_to_plot[key] = temp_i[..., 0]
else:
if len(value.shape) == 5:
temp_i, _ = chopSlice(util.tensor2im(value)[..., 0], cut = 'a', mode='middle')
temp_i = np.stack([temp_i]*3, -1)
else:
temp_i = util.tensor2im(value, is2d = True)
imgs_to_plot[key] = temp_i
mins_ = min([i.min() for i in imgs_to_plot[key]])
maxs_ = max([i.max() for i in imgs_to_plot[key]])
if mins_ < mins:
mins = mins_
if maxs_ > maxes:
maxes = maxs_
# Plot figures.
f = plt.figure(figsize = (7, 2*n_rows))
subplot_counter = 1
for b in range(n_rows):
title_ = ", ".join(imgs_to_plot.keys())
if len(sequences) != 0:
title_ = "%s (%s)" %(title_, sequences[b])
to_plot_array = []
for item, value in imgs_to_plot.items():
if 'label' in item:
to_plot_array.append(get_rgb_colours()[value])
else:
to_plot_array.append(value)
# We need to make sure the images we can concatenate
lista_maxes = np.asarray([i.shape[1:] for i in to_plot_array]).max(0)
padder = monai.transforms.SpatialPad(lista_maxes, mode="constant", value=mins)
for ind, itemim in enumerate(to_plot_array):
to_plot_array[ind] = padder(np.expand_dims(itemim[b, ...], 0))[0, ...]
plt.subplot(n_rows, 1, subplot_counter)
plt.imshow(np.concatenate(to_plot_array, 1), vmin=mins, vmax=maxes)
plt.axis('off')
plt.title(title_)
subplot_counter+=1
plt.suptitle(suptitle)
plt.tight_layout()
plt.savefig(fig_path)
plt.close(f)
def saveFigs_stacks(imgs, fig_path, create_dir=False, nlabels=1, index=-1, same_scale=False, titles=[],
batch_accollades={}, index_label=-1 , bound_normalization = False):
'''
Plots a series of images next to another for one element of the batch or all the batch.
Plots images (not label) stacked together.
:param imgs: Tuple or list of N Bx1/3xHxW tensors or numpy images.
:param fig_path:Path + name to save the file
:param titles: Names to give to each of the N different types of images provided. Must be a list
of the same size as N (not B!). The titles will be the same for each batch.
:param create_dir: Whether the directory to save the images must be created or not.
:param nlabels: Number of labels in the label image.
:param index: If -1, all the batches will be plotted in a different subplot row. Otherwise, index
is the position (0 to B-1) of the image within a specific batch that will be plotted
:param same_scale: Whether you want to plot all (N-1) images within a row/batch, excluding the label,
in the same min-max scale
:param batch_accollades: dictionary of key-string (in case that index is not (-1)) or key-list (where
the list is of size B) of things that must be changed from one batch to another in the titles. Example:
key: sequence, value = [T1, FLAIR, T1, FLAIR], titles = [Input (sequence), ...]. For each item of the
batch, the title would be Input (T1), Input (FLAIR) etc.
:param index_label: the first element of args is the index within N (from 0 to N-1) in which the label/
segmentation map is.
:param bound_normalization: boolean, if True, images are assumed to be bounded between -1 and 1.
:return:
'''
if create_dir:
os.makedirs(os.path.dirname(fig_path), exist_ok=True)
if index == -1:
subplots = True
else:
subplots = False
n_columns = len(imgs)
for ind_img, img in enumerate(imgs):
if len(img.shape) < 4:
if torch.is_tensor(img):
imgs[ind_img] = img.unsqueeze(0)
subplots = False
index = 0
if subplots:
# The number of batches is the first dimension.
# If dim() is 3, there is no batch (there's a single image), so we skip
# and set the number of rows to 1.
n_rows = imgs[0].shape[0] # N_Batches
batch_size = imgs[0].shape[0]
relevant_indices = list(range(0, batch_size))
else:
n_rows = 1
batch_size = imgs[0].shape[0]
relevant_indices = [index]
n_subplots = 1 * n_rows
if titles == []:
titles = ["Image"] * n_columns
# Batch acollades is a key-value pair where the key is a placeholder on the title
# and the value is a list containing the value for which the placeholder will be replaced
# within each batch.
for key, value in batch_accollades.items():
for title in titles:
if key in title:
if subplots:
if len(value) != n_rows:
ValueError("The number of batch acollades for %s must match the batch size. Expected %d"
"but found %d." % (key, n_rows, len(value)))
f = plt.figure(figsize=(7 * n_columns, 5 * n_rows))
subplot_counter = 1
for b in range(batch_size):
if b not in relevant_indices:
continue
# Initialisation
b_imgs = []
maxes = -10000
mins = 10000
# Titles
titles_ = []
for title in titles:
for key, value in batch_accollades.items():
if key in title:
if subplots:
titles_.append(title.replace(key, value[b]))
else:
titles_.append(title.replace(key, value))
else:
titles_.append(title)
if titles_ == []:
titles_ = titles
for ind_i, i in enumerate(imgs):
if ind_i == index_label:
if torch.is_tensor(i[b:b + 1, ...]):
i_ = util.tensor2label(i[b:b + 1, ...], nlabels)
else:
i_ = i
i_ = handleSizes(i_, rgb_ok=True)
b_imgs.append(i_)
else:
if torch.is_tensor(i[b:b + 1, ...]):
i_ = util.tensor2im(i[b:b + 1, ...], bound_normalization = bound_normalization)
else:
i_ = i
i_ = handleSizes(i_)
if i_.min() < mins:
mins = i_.min()
if i_.max() > maxes:
maxes = i_.max()
b_imgs.append(np.stack([i_]*3, -1))
b_imgs = np.concatenate(b_imgs, 1)
plt.subplot(n_rows, 1, subplot_counter)
plt.title(", ".join(titles_), fontsize = 18)
plt.imshow(b_imgs)
plt.axis('off')
subplot_counter += 1
plt.savefig(fig_path)
plt.close(f)
def saveFigs_1row(real, synt, fig_path, create_dir = False, sequence = "", divide = 1,
bound_normalization = False):
'''
Plots the label, input style figure and synthetised image concatenation in a row.
:param label: tensor, label
:param real: tensor, image
:param synt: synthetic image
:param fig_path: figure where you want to save this
:param create_dir: do you want to create the directory if it doesn't exist?
:param nlabels: number of labels
:param sequence: which style is it
:param same_scale: plots real and synt in the same contrast.
:param bound_normalization: if True, images are assumed to be bounded between -1 and 1.
:return:
'''
# Create directory if needed
if create_dir:
os.makedirs(os.path.dirname(fig_path), exist_ok=True)
if torch.is_tensor(real):
real = util.tensor2im(real, normalize= False, bound_normalization = bound_normalization)
# For the synthetic images: they should be N x C x H x W
# We want: H x (NxW)
column_index = 1 # Number of images we've got per row
row_index = 1
total_per_row = int((synt.shape[0] + 1) / divide)
first_row = real
next = None
for sample_id in range(synt.shape[0]):
sample = synt[sample_id, ...]
if torch.is_tensor(sample):
sample[sample<0] = 0.0
sample = util.tensor2im(sample, normalize=False, bound_normalization = bound_normalization)
sample = handleSizes(sample)
column_index += 1
if column_index > total_per_row:
# If we are exceeding the number of images per row
if row_index == 1:
OUT = first_row
else:
OUT = np.concatenate((OUT, next), axis = 0) # On height channel.
row_index += 1
column_index = 1
next = sample
else:
# Don't need to change rows, we enchain columns
if row_index == 1:
first_row = np.concatenate((first_row, sample), axis=1) # On width channel.
else:
next = np.concatenate((next, sample), axis=1) # On width channel.
if next is not None:
try:
OUT = np.concatenate((OUT, next), axis=0)
except:
Warning("Omitted last row on the plot because the 'divide' arg is not a multiple of the"
"generated + 1 number. Consider modifying the parameter")
# We do imshow
plt.figure(figsize=(10, 10))
plt.imshow(OUT, cmap = 'gist_gray')
plt.title("Input style (%s). First: input." % sequence)
plt.colorbar(fraction=0.03)
plt.savefig(fig_path)
plt.close()
def plotHeatmaps(original, tweaked, codes, save_name, bound_normalization = False):
n_codes = tweaked.shape[0]
n_subplots = int(np.ceil(np.sqrt(n_codes + 1)))
original = util.tensor2im(original, normalize=True, bound_normalization = bound_normalization)
f = plt.figure(figsize=(n_subplots*6, n_subplots*4))
plt.subplot(n_subplots, n_subplots, 1)
plt.imshow(original)
subplot_index = 2
for c in range(n_codes):
plt.subplot(n_subplots, n_subplots, subplot_index)
code_im = util.tensor2im(tweaked[c, ...], bound_normalization = bound_normalization)
difference = (code_im - original)/original
difference[np.isnan(difference)] = 0.0
difference = np.round(difference*255).astype('uint8')[:,:,1]
plt.subplot(n_subplots, n_subplots, subplot_index)
plt.imshow(code_im)
plt.imshow(difference, alpha = 0.4, cmap = plt.get_cmap('jet'))
plt.colorbar(fraction=0.035)
plt.title("Code %d" %(codes[c]))
subplot_index +=1
plt.savefig(save_name)
plt.close(f)
def plotMus(original, mu, save_name, bound_normalization = False):
original = util.tensor2im(original, normalize=True, bound_normalization = bound_normalization)
mu = util.tensor2im(mu, normalize=True, bound_normalization = bound_normalization)
f = plt.figure(figsize=(3*6, 4))
plt.subplot(1, 3, 1)
plt.imshow(original)
plt.title("Original")
plt.subplot(1, 3, 2)
plt.imshow(mu)
plt.title("Mean")
plt.subplot(1, 3, 3)
difference = ((mu - original)/original)
difference[np.isnan(difference)] = 0.0
difference = np.round(difference * 255).astype('uint8')[:, :, 1]
plt.imshow(difference, alpha=0.4, cmap=plt.get_cmap('jet'))
plt.colorbar(fraction=0.035)
plt.title("Difference")
plt.savefig(save_name)
plt.close(f)
def handleSizes(img, rgb_ok = False):
if isinstance(img, np.ndarray):
i = deepcopy(img)
if i.ndim > 3:
i = i[0, :,:,:]
# Channel dim can be in position 2 or 1
if i.shape[0] <= 3:
channels = i.shape[0]
if channels != 3:
i = i[0, :, :]
elif channels == 3:
if not rgb_ok:
i = i[1, :, :] # We keep the central channel
else:
i = np.transpose(i, [1, 2, 0])
elif i.shape[2] <= 3:
channels = i.shape[2]
if channels != 3:
i = i[:, :, 0]
if channels == 3:
if not rgb_ok:
i = i[:, :, 1] # We keep the central channel
else:
pass
elif torch.is_tensor(img):
i = img.clone()
if i.dim() > 3:
if i.shape[0] > 1:
i = i[0, -1]
else:
i = i.squeeze(0)
# Channel dim can be in position 2 or 1
if i.shape[0] <= 3:
channels = i.shape[0]
if channels != 3:
i = i[0, :, :]
elif channels == 3:
if not rgb_ok:
i = i[1, :, :] # We keep the central channel
else:
i = i.permute(1, 2, 0)
elif i.shape[2] <= 3:
channels = i.shape[2]
if channels != 3:
i = i[:, :, 0]
if channels == 3:
if not rgb_ok:
i = i[:, :, 1] # We keep the central channel
else:
pass
else:
TypeError("Unknown type.")
return i
def pair_up(list_labs):
np.random.shuffle(list_labs)
pairs = [];
if len(list_labs) % 2 != 0:
cleeve = int((len(list_labs)-1)/2)
pairs = zip(list_labs[1:cleeve], list_labs[cleeve:-1])
else:
cleeve = int((len(list_labs))/2)
pairs = list(zip(list_labs[1:cleeve], list_labs[cleeve:]))
return pairs
def structural_Similarity(img1, img2, mean = False):
'''
Calculates the Structural Similarity Index (SSIM) between two tensors
:param img1, tensor 1
:param img2, tensor 2
:return ssim index (between 0 and 1)
'''
out = []
img1_c = img1.clone()
img2_c = img2.clone()
if img1.dim() != img2.dim():
ValueError("Dimensions must match!")
for b in range(img1_c.shape[0]):
sub_img1_c = img1_c[b,...]
sub_img2_c = img2_c[b,...]
size1 = sub_img1_c.shape
size2 = sub_img2_c.shape
if size1[0] == 1:
sub_img1_c = sub_img1_c.squeeze(0) # Again, we shrink the channel dimension
sub_img1_c = sub_img1_c.detach().cpu().numpy()
elif size1[0] == 3:
sub_img1_c = sub_img1_c[1, :, :] # Again, we shrink the channel dimension
sub_img1_c = sub_img1_c.detach().cpu().numpy()
elif size1[2] ==1:
sub_img1_c = sub_img1_c.squeeze(2) # Again, we shrink the channel dimension
sub_img1_c = sub_img1_c.detach().cpu().numpy()
elif size1[2] == 3:
sub_img1_c = sub_img1_c[:, :, 1] # Again, we shrink the channel dimension
sub_img1_c = sub_img1_c.detach().cpu().numpy()
else:
ValueError("Unknown dimension for im1. Either 0th or 2d dims must be 1 or 3.")
if size2[0] == 1:
sub_img2_c = sub_img2_c.squeeze(0) # Again, we shrink the channel dimension
sub_img2_c = sub_img2_c.detach().cpu().numpy()
elif size2[0] == 3:
sub_img2_c = sub_img2_c[1, :, :] # Again, we shrink the channel dimension
sub_img2_c = sub_img2_c.detach().cpu().numpy()
elif size2[2] ==1:
sub_img2_c = sub_img2_c.squeeze(2) # Again, we shrink the channel dimension
sub_img2_c = sub_img2_c.detach().cpu().numpy()
elif size2[2] == 3:
sub_img2_c = sub_img2_c[:, :, 1] # Again, we shrink the channel dimension
sub_img2_c = sub_img2_c.detach().cpu().numpy()
else:
ValueError("Unknown dimension for im2. Either 0th or 2d dims must be 1 or 3.")
if img1_c.shape[0] == 1:
if torch.is_tensor(sub_img1_c):
sub_img1_c = sub_img1_c.numpy()
if torch.is_tensor(sub_img2_c):
sub_img2_c = sub_img2_c.numpy()
out = [ssim(sub_img1_c, sub_img2_c)]
else:
if torch.is_tensor(sub_img1_c):
sub_img1_c = sub_img1_c.numpy()
if torch.is_tensor(sub_img2_c):
sub_img2_c = sub_img2_c.numpy()
out.append(ssim(sub_img1_c, sub_img2_c))
if mean:
return np.mean(out)
return out
def l2(im1, im2):
'''
Returns the L2 norm between tensors.
:param im1: Image 1 (tensor, B x C x H x W)
:param im2: (tensor, B x C x H x W)
:return:
'''
if im1.shape[0] > 1:
out = []
for b in range(im1.shape[0]):
out.append(torch.sqrt((im1[b,...]-im2[b,...]).pow(2).sum()))
return out
else:
return [float(torch.sqrt((im1-im2).pow(2).sum()))]
def normalizedMutualInfo(im1, im2, bins = 125):
"""
Computes the Normalized Mutual Information Between two images
:param im1: Pytorch tensor image
:param im2: Pytorch tensor image
:return: NMI
"""
im1_bis = im1.clone()
im2_bis = im2.clone()
# We go from tensor to numpy
im1_bis = im1_bis.detach().cpu().numpy()
im2_bis = im2_bis.detach().cpu().numpy()
# We compute the 2D histogram
joint_his, _, _ = np.histogram2d(im1_bis.ravel(), im2_bis.ravel(), bins)
# NMI
# Need to fetch probabilities
joint_prob = joint_his / np.sum(joint_his) # Normalize histogram
prob1 = np.sum(joint_prob, axis=1)
prob2 = np.sum(joint_prob, axis=0)
prob1_prob2 = prob1[:, None] * prob2[None, :]
joint_prob_NZ = joint_prob > 0
# Mutual Information
MI = np.sum(joint_prob[joint_prob_NZ] * np.log(joint_prob[joint_prob_NZ] / prob1_prob2[joint_prob_NZ]))
# Entropy
prob1_NZ = prob1 > 0
entropy1 = -np.sum(prob1[prob1_NZ] * np.log(prob1[prob1_NZ]))
prob2_NZ = prob2 > 0
entropy2 = -np.sum(prob2[prob2_NZ] * np.log(prob2[prob2_NZ]))
return (2 * MI / (entropy1 + entropy2))
def plotTrainingLosses(loss_file, *args):
if len(args) > 0:
savedir = args[0]
else:
savedir = loss_file.split('/')
savedir = "/".join(savedir[:-1])
losses = {} # All iterations in one epoch
losses_av = {} # Per epoch
epoch_vector = []
iter_vector = []
modalities = {"T1": 'darkblue', "FLAIR": 'teal'}
better_names = {'KLD': "KLD Loss", "GAN": "Total GAN Loss", "VGG": "VGG Loss", "GAN_Feat": "Feature Loss (GAN)", "D_Fake": "Discriminator (on fake)", "D_real": "Discriminator (on real)", "coherence": "NMI inter-sequence loss"}
f = open(loss_file, 'r')
lines = f.readlines()
last_epoch = 0
for ind_line, line in enumerate(lines):
if '(epoch' in line:
splitline = line.split('(')
splitline = splitline[-1]
splitline = splitline.split(')')
epoch, iter = process_Epoch_Stream(splitline[0])
iter_vector.append(epoch*iter)
if epoch != last_epoch:
last_epoch = epoch
epoch_vector.append(epoch)
# With each change of epoch, we empty the losses
if epoch != 1: # Because we have no data for "epoch 0"
for loss_type, loss_val_dict in losses.items():
for loss_mod, loss_vector in loss_val_dict.items():
if loss_type not in losses_av.keys():
losses_av[loss_type] = {}
losses_av[loss_type][loss_mod] = [np.mean(loss_vector)]
else:
if loss_mod not in losses_av[loss_type].keys():
losses_av[loss_type][loss_mod] = [np.mean(loss_vector)]
else:
losses_av[loss_type][loss_mod].append(np.mean(loss_vector))
losses = {}
splitline = splitline[-1] # We keep the losses
losses_line = splitline.split(";")
losses_line = losses_line[:-1] # Last item is a linebreak
for loss in losses_line:
loss_sp = loss.split(':')
loss_id = loss_sp[0] # Loss ID
loss_id = loss_id.replace(" ", "")
try:
if better_names[loss_id] not in losses.keys():
losses[better_names[loss_id]] = {} # We try the better name if it's in the dictionnary
loss_id = better_names[loss_id]
except:
if loss_id not in losses.keys():
losses[loss_id] = {} # Otherwise we put this one
loss_sp = loss_sp[-1]
loss_sp = loss_sp.split(" ") # Split on spaces
if loss_sp[0] == "":
del loss_sp[0]
ranges = np.arange(0, len(loss_sp), 2)
for r in ranges:
if loss_sp[r] not in losses[loss_id].keys():
losses[loss_id][loss_sp[r]] = [np.round(float(loss_sp[r+1]),3)] # Append the rth+1 element (value for the modality) with the modality (rth element) as key
else:
losses[loss_id][loss_sp[r]].append(np.round(float(loss_sp[r+1]), 3))
# Last epoch processing
for loss_type, loss_val_dict in losses.items():
for loss_mod, loss_vector in loss_val_dict.items():
if loss_type not in losses_av.keys():
losses_av[loss_type] = {}
losses_av[loss_type][loss_mod] = [np.mean(loss_vector)]
else:
if loss_mod not in losses_av[loss_type].keys():
losses_av[loss_type][loss_mod] = [np.mean(loss_vector)]
else:
losses_av[loss_type][loss_mod].append(np.mean(loss_vector))
losses = {}
# Discriminators in the same plot!
f = plt.figure(figsize = (15, 15)) # Plot a figure
# Number of losses
n_losses = len(losses_av.keys())-1 # Discriminators go together
# Number of subplots
n_plots = int(np.ceil(np.sqrt(n_losses)))
plot_ID = 1
this_legend = []
for key, lossval in losses_av.items():
plt.subplot(n_plots, n_plots, plot_ID)
if 'Discriminator' in key:
continue
for mod, mod_color in modalities.items():
plt.plot(epoch_vector, lossval[mod], ':', color = mod_color)
this_legend.append(mod)
plt.legend(this_legend)
plt.title(key)
plt.xlabel("Iteration")
plt.ylabel("Loss value")
plot_ID = plot_ID+1
# We need to plot discriminator losses
plt.subplot(n_plots, n_plots, plot_ID)
this_legend = []
for mod, mod_color in modalities.items():
#plt.plot(epoch_vector, losses_av['Discriminator (on fake)'][mod], ':', color=mod_color)
#plt.plot(epoch_vector, losses_av['Discriminator (on real)'][mod], ':', color=mod_color)
sum_losses = [i + losses_av['Discriminator (on fake)'][mod][ind] for ind, i in enumerate(losses_av['Discriminator (on real)'][mod])]
plt.plot(epoch_vector, sum_losses, linestyle='-', color=mod_color)
#this_legend.append(mod+" (on fake)")
#this_legend.append(mod+" (on real)")
this_legend.append(mod + " (TOTAL)")
plt.legend(this_legend)
plt.title(key)
plt.xlabel("Iteration")
plt.ylabel("Loss value")
plt.savefig(os.path.join(savedir, 'training_losses.png'))
plt.close()
def plot_Histogram(realT1, fakeT1, realFLAIR, fakeFLAIR, save_name):
kl_value = {}
realT1 = realT1.flatten()
fakeT1 = fakeT1.flatten()
realFLAIR = realFLAIR.flatten()
fakeFLAIR = fakeFLAIR.flatten()
realT1 = realT1[realT1>=1]
fakeT1 = fakeT1[fakeT1 >= 1]
realFLAIR = realFLAIR[realFLAIR >= 1]
fakeFLAIR = fakeFLAIR[fakeFLAIR >= 1]
plt.figure(figsize = (15, 4))
plt.subplot(1, 2, 1)
plt.hist(realT1.flatten(), alpha = 0.8, color = 'lightgreen', label = "Real", bins = 75)
plt.hist(fakeT1.flatten(), alpha = 0.4, color = 'dodgerblue', label = "Fake", bins = 75)
plt.legend()
plt.title("T1")
hist_T1_real, bin_edges = np.histogram(realT1.flatten(), bins = 75)
hist_T1_fake, _ = np.histogram(fakeT1.flatten(), bins = bin_edges)
kl_value['T1'] = KL_Divergence(hist_T1_fake, hist_T1_real)
plt.subplot(1, 2, 2)
plt.hist(realFLAIR.flatten(), alpha = 0.8, color = 'lightgreen', label = "Real", bins = 75)
plt.hist(fakeFLAIR.flatten(), alpha = 0.4, color = 'dodgerblue', label = "Fake", bins = 75)
plt.legend()
plt.title("FLAIR")
hist_FLAIR_real, bin_edges = np.histogram(realFLAIR.flatten(), bins = 75)
hist_FLAIR_fake, _ = np.histogram(fakeFLAIR.flatten(), bins = bin_edges)
kl_value['FLAIR'] = KL_Divergence(hist_FLAIR_fake, hist_FLAIR_real)
plt.savefig(save_name)
plt.close()
return kl_value
def KL_Divergence(p, q, epsilon = 0.000001):
epsilon = epsilon
# We want probabilities, so we normalise
p = np.asarray(p/sum(p), dtype=np.float) + epsilon
q = np.asarray(q/sum(q), dtype=np.float) + epsilon
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def plotTrainingLosses_1dec(loss_file, smooth = False, *args):
if len(args) > 0:
savedir = args[0]
else:
savedir = loss_file.split('/')
savedir = "/".join(savedir[:-1])
losses = {} # All iterations in one epoch
losses_av = {} # Per epoch
epoch_vector = []
iter_vector = []
modalities = {"T1": 'darkblue', "FLAIR": 'teal'}
better_names = {'KLD': "KLD Loss", "GAN": "Total GAN Loss", "VGG": "VGG Loss", "GAN_Feat": "Feature Loss (GAN)", "D_Fake": "Discriminator (on fake)",
"D_real": "Discriminator (on real)", "coherence": "NMI inter-sequence loss",
"D_acc_fakes": "Discrim. Acc. (Fakes)", "D_acc_reals": "Discrim. Acc. (Reals)",
"D_acc_total": "Discrim. Acc.", "mod_disc": "Modality Discrimination"}
f = open(loss_file, 'r')
lines = f.readlines()
last_epoch = 0
for ind_line, line in enumerate(lines):
if '(epoch' in line:
splitline = line.split('(')
splitline = splitline[-1]
splitline = splitline.split(')')
epoch, iter = process_Epoch_Stream(splitline[0])
iter_vector.append(epoch*iter)
if epoch != last_epoch:
# Before continuing processing the line, we dump the previous epoch info into losses_av.
# losses_av has the mean per epoch.
last_epoch = epoch
epoch_vector.append(epoch)
# With each change of epoch, we empty the losses
if epoch != 1: # Because we have no data for "epoch 0"
# Ie. If epoch is 2, we are dumping info about epoch 1.
for loss_type, loss_val in losses.items():
if loss_type not in losses_av.keys():
losses_av[loss_type] = {}
losses_av[loss_type] = [np.mean(loss_val)]
else:
losses_av[loss_type].append(np.mean(loss_val))
losses = {}
splitline = splitline[-1] # We keep the losses
losses_line = splitline.split(";")
losses_line = losses_line[:-1] # Last item is a linebreak
for loss in losses_line:
loss_sp = loss.split(':')
loss_id = loss_sp[0] # Loss ID
loss_id = loss_id.replace(" ", "")
loss_sp = loss_sp[-1]
loss_sp = loss_sp.split(" ") # Split on spaces
if loss_sp[0] == "":
del loss_sp[0]
try:
if better_names[loss_id] not in losses.keys():
losses[better_names[loss_id]] = [] # We try the better name if it's in the dictionnary
loss_id = better_names[loss_id]
if loss_id not in losses.keys():
losses[loss_id] = [] # Otherwise we put this one
losses[loss_id] = float(loss_sp[0]) # Sole loss value
else:
losses[loss_id].append(float(loss_sp[0]))
except:
if loss_id not in losses.keys():
losses[loss_id] = [] # Otherwise we put this one
losses[loss_id] = [float(loss_sp[0])] # Sole loss value
else:
losses[loss_id].append(float(loss_sp[0]))
# Last epoch processing
for loss_type, loss_val in losses.items():
if loss_type not in losses_av.keys():
losses_av[loss_type] = [np.mean(loss_val)]
else:
losses_av[loss_type].append(np.mean(loss_val))
losses = {}
# Make sure that the losses are leveraged
max_len = max([len(v) for k, v in losses_av.items()])
for k, v in losses_av.items():
n_z = max_len - len(v)
losses_av[k] = [0]*n_z + v
# Discriminators in the same plot!
f = plt.figure(figsize = (30, 20)) # Plot a figure
# Number of losses
n_losses = len(losses_av.keys())-1 #-2 # Discriminators go together, accuracies as well
# Number of subplots
n_plots = int(np.ceil(np.sqrt(n_losses)))
plot_ID = 1
this_legend = []
for key, lossval in losses_av.items():
plt.subplot(n_plots, n_plots, plot_ID)
if 'Discriminator' in key:
plot_disc = plot_ID
continue
if "Acc" in key:
plot_acc = plot_ID
continue
new_epoch_vector = np.arange(0, len(lossval), 1)
if smooth:
filtered = sm.nonparametric.lowess(lossval, new_epoch_vector, frac=0.1, return_sorted=False)
plt.plot(new_epoch_vector, filtered, ':', color='teal')
else:
plt.plot(new_epoch_vector, lossval, ':', color = 'teal')
plt.title(key, fontsize=22)
plt.xlabel("Iteration", fontsize=16)
plt.ylabel("Loss value", fontsize=16)
plot_ID = plot_ID+1
# We need to plot discriminator losses
plt.subplot(n_plots, n_plots, plot_ID)
new_epoch_vector = np.arange(0, len(losses_av['Discriminator (on fake)']), 1)
if smooth:
filtered_1 = sm.nonparametric.lowess(losses_av['Discriminator (on fake)'], new_epoch_vector, frac=0.1, return_sorted=False)
filtered_2 = sm.nonparametric.lowess(losses_av['Discriminator (on real)'], new_epoch_vector, frac=0.1, return_sorted=False)
plt.plot(new_epoch_vector, filtered_1, ':', color='teal')
plt.plot(new_epoch_vector, filtered_2, ':', color='firebrick')
else:
plt.plot(new_epoch_vector, losses_av['Discriminator (on fake)'], ':', color='teal')
plt.plot(new_epoch_vector, losses_av['Discriminator (on real)'], ':', color='firebrick')
plt.title('Discriminator loss', fontsize=22)
plt.xlabel("Iteration", fontsize=16)
plt.ylabel("Loss value", fontsize=16)
plot_ID += 1
# We need to plot discriminator accuracies
plt.subplot(n_plots, n_plots, plot_ID)
new_epoch_vector = np.arange(0, len(losses_av['Discrim. Acc. (Fakes)']), 1)
if smooth:
filtered_fakes = sm.nonparametric.lowess(losses_av['Discrim. Acc. (Fakes)'], new_epoch_vector, frac=0.1, return_sorted=False)
filtered_reals = sm.nonparametric.lowess(losses_av['Discrim. Acc. (Reals)'], new_epoch_vector, frac=0.1, return_sorted=False)
filtered_totals = sm.nonparametric.lowess(losses_av['Discrim. Acc.'], new_epoch_vector, frac=0.1, return_sorted=False)
plt.plot(new_epoch_vector, filtered_fakes, color='teal', alpha=0.5)
plt.plot(new_epoch_vector, filtered_reals, color='firebrick', alpha=0.5)
plt.plot(new_epoch_vector, filtered_totals, '-', color='grey', linewidth=2)
else:
plt.plot(new_epoch_vector, losses_av['Discrim. Acc. (Fakes)'], color='teal', alpha = 0.5)
plt.plot(new_epoch_vector, losses_av['Discrim. Acc. (Reals)'], color='firebrick', alpha = 0.5)
plt.plot(new_epoch_vector, losses_av['Discrim. Acc.'], '-', color='grey', linewidth = 2)
plt.title('Discriminator Accuracy', fontsize=22)
plt.xlabel("Iteration", fontsize=16)
plt.ylabel("Accuracy value", fontsize=16)
plt.legend(["On fakes", "On reals", "Total"])
plt.subplots_adjust(hspace=0.4)
plt.savefig(os.path.join(savedir, 'training_losses.png'))
def saveSingleImage(img, label, path, skullstrip, denormalize = False,
islabel = False, create_dir = False,
save_as_grey = False):
'''
Saves a single image as a PIL image.
:param img: Image (in torch, numpy format)
:param label: Label corresponding to the image (for Skull Strip)
:param path: Where you want to store it and the name
:param skullstrip: Whether you want to skull strip
:param save_as_grey: whether to save as greyscale float
'''
if denormalize and not islabel and not save_as_grey:
img = (img + 1) / 2.0 * 255.0
if skullstrip and not islabel:
img = SkullStrip(img, label, value = img.min())
if img.shape[0] < 4:
img = img.permute(1,2,0)
if not islabel:
if not save_as_grey:
img = handleSizes(img, rgb_ok=False)
# The range is 0 to 255. In order that it's not necessary to normalize, we make it RGB.
img = np.stack((img, img, img), axis = 2)
else:
img = (img - img.min()) / (img.max() - img.min())
img = 254*img.numpy()
img = np.concatenate([img]*3, -1)
path_dir = "/".join(path.split("/")[:-1])
if not os.path.isdir(path_dir):
os.makedirs(path_dir)
img = Image.fromarray(img.astype('uint8'))
img.save(path)
def process_Epoch_Stream(line):
"""
Process a string with: "epoch: XXX, iters: XXX, XXXXXX"
:param line: String with epoch and iteration numbers.
:return: Returns the total iteration number
"""
line = line.replace(" ", "")
epoch_section = line.split("epoch:")
epoch_section = epoch_section[-1]
epoch_section = epoch_section.split(",", 1)
epoch = int(epoch_section[0])
iter_section = epoch_section[-1]
iter_section = iter_section.split("iters:")
iter_section = iter_section[-1]
iter_section = iter_section.split(",")
iter = int(iter_section[0])
return (epoch, iter)
def set_deterministic(is_deterministic: bool, random_seed: int = 0) -> None:
"""
Author: P-Daniel Tudosiu
Sets the torch and numpy random seeds to random_seed and whether CuDNN's behaviour
is deterministic or not. If it is deterministic big speed penalties might be
incurred.
For more details see https://pytorch.org/docs/stable/notes/randomness.html#cudnn
Args:
is_deterministic: If CuDNN behaviour is deterministic or not
"""
torch.manual_seed(random_seed)
np.random.seed(random_seed)
if is_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
def SkullStrip(image, label,value = 0.0, epsilon = 0.65, ):
'''
Sets everything that's background according to the label to 0 with a Gaussian-filter applied margin.
:param image:
:param label:
:return:
'''
image[label[:, 0:1, ...] > epsilon] = value # We zero label parts of the image
return image
def adapt_size(img, size):
'''
Crops an input image so that its size matches the one specified.
:param img: Input image. Formats accepted: [X, Y, Z, C], [X, Y, C]
:param size: List with the size per dimension
:return: Cropped image (or extended)
'''
no_channels = False
img_ = img.copy()
if len(img_.shape) > 4:
TypeError("Only three or four dimensional images supported.")
elif len(img_.shape) == 2:
no_channels = True
if len(size) > 3:
TypeError("Only two or three-dimensional changes supported.")
initial_shape = img_.shape
if no_channels:
img_ = np.expand_dims(img_, axis=-1)
on_each_side = []
crop_mode = []
for sizeitem in size:
on_each_side.append([])
crop_mode.append(False)
for s_ind, s in enumerate(size):
rem_s = size[s_ind] - img_.shape[s_ind]
if rem_s < 0:
crop_mode[s_ind] = True
rem_s = img_.shape[s_ind] - size[s_ind]