Skip to content

Commit

Permalink
Merge pull request #19 from ivadomed/um/ivadomed_scseg_training
Browse files Browse the repository at this point in the history
`ivadomed` configs for training spinal cord segmentation
  • Loading branch information
uzaymacar authored Dec 23, 2021
2 parents 40576c0 + c8359b1 commit 0990668
Show file tree
Hide file tree
Showing 2 changed files with 156 additions and 3 deletions.
148 changes: 148 additions & 0 deletions config/seg_sc.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
{
"command": "train",
"gpu_ids": [0],
"path_output": "seg_sc_output",
"model_name": "seg_sc_model",
"debugging": true,
"object_detection_params": {
"object_detection_path": null,
"safety_factor": [1.0, 1.0, 1.0]
},
"loader_parameters": {
"path_data": ["basel-mp2rage-preprocessed/data_processed_scseg"],
"subject_selection": {
"n": [],
"metadata": [],
"value": []
},
"target_suffix": ["_seg-manual"],
"extensions": [],
"roi_params": {
"suffix": null,
"slice_filter_roi": null
},
"contrast_params": {
"training_validation": ["UNIT1"],
"testing": ["UNIT1"],
"balance": {}
},
"slice_filter_params": {
"filter_empty_mask": false,
"filter_empty_input": false
},
"slice_axis": "sagittal",
"multichannel": false,
"soft_gt": false
},
"split_dataset": {
"fname_split": null,
"random_seed": 42,
"center_test": [],
"method": "per_patient",
"balance": null,
"train_fraction": 0.75,
"test_fraction": 0.2
},
"training_parameters": {
"batch_size": 2,
"loss": {
"name": "DiceLoss"
},
"training_time": {
"num_epochs": 200,
"early_stopping_patience": 50,
"early_stopping_epsilon": 0.001
},
"scheduler": {
"initial_lr": 1e-3,
"lr_scheduler": {
"name": "CosineAnnealingLR",
"base_lr": 1e-5,
"max_lr": 1e-3
}
},
"balance_samples": {
"applied": false,
"type": "gt"
},
"mixup_alpha": null,
"transfer_learning": {
"retrain_model": null,
"retrain_fraction": 1.0,
"reset": true
}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"bn_momentum": 0.1,
"depth": 4,
"is_2d": false,
"final_activation": "sigmoid"
},
"uncertainty": {
"epistemic": false,
"aleatoric": false,
"n_it": 0
},
"postprocessing": {"binarize_prediction": {"thr": 0.5}},
"evaluation_parameters": {},
"transformation": {
"Resample": {
"wspace": 1.0,
"hspace": 1.0,
"dspace": 1.0
},
"CenterCrop": {
"size": [256, 256, 256]
},
"RandomReverse": {
"applied_to": ["im", "gt"],
"dataset_type": ["training"]
},
"RandomAffine": {
"degrees": 10,
"scale": [0.2, 0.2, 0.2],
"translate": [0.0, 0.0, 0.0],
"applied_to": ["im", "gt"],
"dataset_type": ["training"]
},
"ElasticTransform": {
"alpha_range": [25.0, 35.0],
"sigma_range": [3.5, 5.5],
"p": 0.5,
"applied_to": ["im", "gt"],
"dataset_type": ["training"]
},
"RandomGamma": {
"log_gamma_range": [-1.0, 1.0],
"p": 0.5,
"applied_to": ["im"],
"dataset_type": ["training"]
},
"RandomBiasField": {
"coefficients": 0.5,
"order": 3,
"p": 0.3,
"applied_to": ["im"],
"dataset_type": ["training"]
},
"RandomBlur": {
"sigma_range": [0.0, 2.0],
"p": 0.3,
"applied_to": ["im"],
"dataset_type": ["training"]
},
"NumpyToTensor": {},
"NormalizeInstance": {
"applied_to": ["im"]
}
},
"Modified3DUNet": {
"applied": true,
"length_3D": [256, 256, 256],
"stride_3D": [256, 256, 256],
"attention": false,
"n_filters": 8
}
}
11 changes: 8 additions & 3 deletions preprocessing/qc_preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
subjects = subjects_df['participant_id'].values.tolist()

# Log resolutions and sizes for data exploration
resolutions, sizes = [], []
resolutions, sizes, crop_sizes = [], [], []

# Log problematic subjects for QC
failed_crop_subjects, shape_mismatch_subjects, left_out_lesion_subjects = [], [], []
Expand All @@ -41,18 +41,22 @@
subject_images_path = os.path.join(args.sct_output_path, 'data_processed', subject, 'anat')
subject_labels_path = os.path.join(args.sct_output_path, 'data_processed', 'derivatives', 'labels', subject, 'anat')

# Read cropped subject image (i.e. 3D volume) to be used for training
# Read original and cropped subject image (i.e. 3D volume) to be used for training
img_path = os.path.join(subject_images_path, '%s_UNIT1.nii.gz' % subject)
img_crop_fpath = os.path.join(subject_images_path, '%s_UNIT1_crop.nii.gz' % subject)
if not os.path.exists(img_crop_fpath):
failed_crop_subjects.append(subject)
continue
img = nib.load(img_path)
img_crop = nib.load(img_crop_fpath)

# Get and log size and resolution for each subject image
size = img_crop.get_fdata().shape
size = img.get_fdata().shape
crop_size = img_crop.get_fdata().shape
resolution = tuple(img_crop.header['pixdim'].tolist()[1:4])
resolution = tuple([np.round(r, 1) for r in list(resolution)])
sizes.append(size)
crop_sizes.append(crop_size)
resolutions.append(resolution)

# Read original and cropped subject ground-truths (GT)
Expand All @@ -78,6 +82,7 @@

print('RESOLUTIONS: ', Counter(resolutions))
print('SIZES: ', Counter(sizes))
print('CROP SIZES: ', Counter(crop_sizes))

print('Could not find cropped image for the following subjects: ', failed_crop_subjects)
print('Found shape mismatch in images and GTs for the following subjects: ', shape_mismatch_subjects)
Expand Down

0 comments on commit 0990668

Please sign in to comment.