@@ -542,6 +542,45 @@ def call(self, inputs):
542
542
543
543
self .assertIn ('label' , model .seen_input_keys )
544
544
545
+ @parameterized .named_parameters ([
546
+ ('sequential' , build_linear_keras_sequential_model ),
547
+ ('sequential_no_input_layer' ,
548
+ build_linear_keras_sequential_model_no_input_layer ),
549
+ ('functional' , build_linear_keras_functional_model ),
550
+ ('subclassed' , build_linear_keras_subclassed_model ),
551
+ ])
552
+ def test_train_pgd (self , model_fn ):
553
+ w = np .array ([[4.0 ], [- 3.0 ]])
554
+ x0 = np .array ([[2.0 , 3.0 ]])
555
+ y0 = np .array ([[0.0 ]])
556
+ adv_multiplier = 0.2
557
+ adv_step_size = 0.01
558
+ learning_rate = 0.01
559
+ pgd_iterations = 3
560
+ pgd_epsilon = 2.5 * adv_step_size
561
+ adv_config = configs .make_adv_reg_config (
562
+ multiplier = adv_multiplier ,
563
+ adv_step_size = adv_step_size ,
564
+ adv_grad_norm = 'infinity' ,
565
+ pgd_iterations = pgd_iterations ,
566
+ pgd_epsilon = pgd_epsilon )
567
+ y_hat = np .dot (x0 , w )
568
+ # The adversarial perturbation is constant across PGD iterations.
569
+ x_adv = x0 + pgd_epsilon * np .sign ((y_hat - y0 ) * w .T )
570
+ y_hat_adv = np .dot (x_adv , w )
571
+ grad_w_labeled_loss = 2. * (y_hat - y0 ) * x0 .T
572
+ grad_w_adv_loss = adv_multiplier * 2. * (y_hat_adv - y0 ) * x_adv .T
573
+ w_new = w - learning_rate * (grad_w_labeled_loss + grad_w_adv_loss )
574
+
575
+ inputs = {'feature' : tf .constant (x0 ), 'label' : tf .constant (y0 )}
576
+ model = model_fn (input_shape = (2 ,), weights = w )
577
+ adv_model = adversarial_regularization .AdversarialRegularization (
578
+ model , label_keys = ['label' ], adv_config = adv_config )
579
+ adv_model .compile (tf .keras .optimizers .SGD (learning_rate ), loss = 'MSE' )
580
+ adv_model .fit (x = inputs , batch_size = 1 , steps_per_epoch = 1 )
581
+
582
+ self .assertAllClose (w_new , tf .keras .backend .get_value (model .weights [0 ]))
583
+
545
584
def test_evaluate_binary_classification_metrics (self ):
546
585
# multi-label binary classification model
547
586
w = np .array ([[4.0 , 1.0 , - 5.0 ], [- 3.0 , 1.0 , 2.0 ]])
@@ -633,6 +672,30 @@ def test_perturb_on_batch_custom_config(self):
633
672
self .assertAllClose (x_adv , adv_inputs ['feature' ])
634
673
self .assertAllClose (y0 , adv_inputs ['label' ])
635
674
675
+ @parameterized .named_parameters ([
676
+ ('sequential' , build_linear_keras_sequential_model ),
677
+ ('sequential_no_input_layer' ,
678
+ build_linear_keras_sequential_model_no_input_layer ),
679
+ ('functional' , build_linear_keras_functional_model ),
680
+ ('subclassed' , build_linear_keras_subclassed_model ),
681
+ ])
682
+ def test_perturb_on_batch_pgd (self , model_fn ):
683
+ w , x0 , y0 , lr , adv_config , _ = self ._set_up_linear_regression ()
684
+ pgd_epsilon = 4.5 * adv_config .adv_neighbor_config .adv_step_size
685
+ adv_config .adv_neighbor_config .pgd_iterations = 5
686
+ adv_config .adv_neighbor_config .pgd_epsilon = pgd_epsilon
687
+ inputs = {'feature' : x0 , 'label' : y0 }
688
+ model = model_fn (input_shape = (2 ,), weights = w )
689
+ adv_model = adversarial_regularization .AdversarialRegularization (
690
+ model , label_keys = ['label' ], adv_config = adv_config )
691
+ adv_model .compile (optimizer = tf .keras .optimizers .SGD (lr ), loss = ['MSE' ])
692
+ adv_inputs = adv_model .perturb_on_batch (inputs )
693
+
694
+ y_hat = np .dot (x0 , w )
695
+ x_adv = x0 + pgd_epsilon * np .sign ((y_hat - y0 ) * w .T )
696
+ self .assertAllClose (x_adv , adv_inputs ['feature' ])
697
+ self .assertAllClose (y0 , adv_inputs ['label' ])
698
+
636
699
637
700
if __name__ == '__main__' :
638
701
tf .test .main ()
0 commit comments