diff --git a/datasets/eval_query.txt b/datasets/eval_query.txt index cb6a97d..fa57bcc 100644 --- a/datasets/eval_query.txt +++ b/datasets/eval_query.txt @@ -16,4 +16,4 @@ 10000129_1_CTce_ThAb.mat 10000136_1_CTce_ThAb.mat 10000110_1_CTce_ThAb.mat -10000130_1_CTce_ThAb.mat +10000105_1_CTce_ThAb.mat diff --git a/datasets/eval_support.txt b/datasets/eval_support.txt index 6de01b3..ea11171 100644 --- a/datasets/eval_support.txt +++ b/datasets/eval_support.txt @@ -1 +1 @@ -10000105_1_CTce_ThAb.mat \ No newline at end of file +10000130_1_CTce_ThAb.mat diff --git a/run_oneshot.py b/run_oneshot.py index e91a993..64b38c3 100644 --- a/run_oneshot.py +++ b/run_oneshot.py @@ -47,7 +47,7 @@ def train(train_params, common_params, data_params, net_params): train_data, test_data = load_data(data_params) folds = ['fold1'] - model_prefix = 'model6_Focal_loss_noClsLastDec_' + model_prefix = 'model6_Focal_gamma2_after3epoch_' for fold in folds: final_model_path = os.path.join(common_params['save_model_dir'], model_prefix + fold + '.pth.tar') @@ -121,7 +121,7 @@ def evaluate(eval_params, net_params, data_params, common_params, train_params): model_name = 'model6_Dice_L2_loss_target_fold1.pth.tar' folds = ['fold1'] - eval_model_path1 = "saved_models/model6_Dice_L2_loss_target_fold1.pth.tar" + eval_model_path1 = "saved_models/model6_Focal_gamma2_after3epoch_fold1.pth.tar" eval_model_path2 = "saved_models/model6_coronal_fold1.pth.tar" eval_model_path3 = "saved_models/model6_sagittal_fold1.pth.tar" diff --git a/settings.ini b/settings.ini index 472ae27..f41d3af 100644 --- a/settings.ini +++ b/settings.ini @@ -37,7 +37,7 @@ exp_name = "model6_fold1" final_model_file = "model6_fold1_DiceL2.pth.tar" learning_rate = 1e-1 momentum = 0.95 -train_batch_size = 6 +train_batch_size = 8 val_batch_size = 5 log_nth = 10 num_epochs = 20 @@ -64,4 +64,4 @@ support_txt_file = "datasets/eval_support.txt" remap_config = "WholeBody" #Valid options : COR, AXI, SAG orientation = "AXI" -save_predictions_dir = "predictions_1view_DiceL2_target" \ No newline at end of file +save_predictions_dir = "predictions_Focal_gamma2_after3epoch" \ No newline at end of file diff --git a/solver_oneshot_multiOpti_auto.py b/solver_oneshot_multiOpti_auto.py index 3b731c8..3db1ff6 100644 --- a/solver_oneshot_multiOpti_auto.py +++ b/solver_oneshot_multiOpti_auto.py @@ -50,11 +50,11 @@ def __init__(self, # self.optim = optim(model.parameters(), **optim_args) self.optim_c = optim( - [{'params': model.conditioner.parameters(), 'lr': 1e-15, 'momentum': 0.95, 'weight_decay': 0.001} + [{'params': model.conditioner.parameters(), 'lr': 1e-2, 'momentum': 0.95, 'weight_decay': 0.0001} ], **optim_args) self.optim_s = optim( - [{'params': model.segmentor.parameters(), 'lr': 1e-15, 'momentum': 0.95, 'weight_decay': 0.001} + [{'params': model.segmentor.parameters(), 'lr': 1e-2, 'momentum': 0.95, 'weight_decay': 0.0001} ], **optim_args) # self.scheduler = lr_scheduler.StepLR(self.optim, step_size=5, @@ -153,7 +153,7 @@ def train(self, train_loader, test_loader): if model.is_cuda: condition_input, query_input, y2, y1 = condition_input.cuda(self.device, - non_blocking=True), query_input.cuda( + non_blocking=True), query_input.cuda( self.device, non_blocking=True), y2.cuda( self.device, non_blocking=True), y1.cuda( @@ -165,13 +165,14 @@ def train(self, train_loader, test_loader): # space_w, channel_w = weights # e_w1, e_w2, e_w3, bn_w, d_w3, d_w2, d_w1, cls_w = space_w # e_c1, e_c2, e_c3, bn_c, d_c3, d_c2, d_c1, cls_c = channel_w - e_w1, e_w2, e_w3, bn_w, d_w3, d_w2, d_w1, cls_w = weights - weights = [e_w1, e_w2, e_w3, bn_w, d_w3, d_w2, None, None] + # e_w1, e_w2, e_w3, bn_w, d_w3, d_w2, d_w1, cls_w = weights + # weights = [e_w1, e_w2, e_w3, bn_w, d_w3, d_w2, d_w1, cls_w] # channel_w = [e_c1, e_c2, e_c3, bn_c, d_c3, d_c2, d_c1, cls_c] # weights = (space_w, channel_w) output = model.segmentor(query_input, weights) # TODO: add weights - loss = self.loss_func(F.softmax(output, dim=1), y2, y1) + cost_weight = (1, 0) if epoch < 3 else (0, 1) + loss = self.loss_func(F.softmax(output, dim=1), y2, weight=cost_weight) optim_s.zero_grad() optim_c.zero_grad() loss.backward()