@@ -469,127 +469,110 @@ def loss():
469
469
)
470
470
471
471
472
- @test_utils .run_all_in_graph_and_eager_modes
473
- class ConditionalGradientTest (tf .test .TestCase ):
474
- def testMinimizeWith2DIndiciesForEmbeddingLookupNuclear (self ):
475
- # This test invokes the ResourceSparseApplyConditionalGradient
476
- # operation.
477
- var0 = tf .Variable (tf .ones ([2 , 2 ]))
472
+ @pytest .mark .usefixtures ("maybe_run_functions_eagerly" )
473
+ def test_minimize_with_2D_indicies_for_embedding_lookup_nuclear ():
474
+ # This test invokes the ResourceSparseApplyConditionalGradient
475
+ # operation.
476
+ var0 = tf .Variable (tf .ones ([2 , 2 ]))
478
477
479
- def loss ():
480
- return tf .math .reduce_sum (tf .nn .embedding_lookup (var0 , [[1 ]]))
478
+ def loss ():
479
+ return tf .math .reduce_sum (tf .nn .embedding_lookup (var0 , [[1 ]]))
481
480
482
- # the gradient for this loss function:
483
- grads0 = tf .constant ([[0 , 0 ], [1 , 1 ]], dtype = tf .float32 )
484
- top_singular_vector0 = cg_lib .ConditionalGradient ._top_singular_vector (grads0 )
481
+ # the gradient for this loss function:
482
+ grads0 = tf .constant ([[0 , 0 ], [1 , 1 ]], dtype = tf .float32 )
483
+ top_singular_vector0 = cg_lib .ConditionalGradient ._top_singular_vector (grads0 )
485
484
486
- learning_rate = 0.1
487
- lambda_ = 0.1
488
- ord = "nuclear"
489
- opt = cg_lib .ConditionalGradient (
490
- learning_rate = learning_rate , lambda_ = lambda_ , ord = ord
491
- )
492
- cg_op = opt .minimize (loss , var_list = [var0 ])
493
- self .evaluate (tf .compat .v1 .global_variables_initializer ())
494
-
495
- # Run 1 step of cg_op
496
- self .evaluate (cg_op )
497
- top_singular_vector0 = self .evaluate (top_singular_vector0 )
498
- self .evaluate (var0 )
499
- self .assertAllCloseAccordingToType (
500
- [
501
- learning_rate * 1
502
- - (1 - learning_rate ) * lambda_ * top_singular_vector0 [1 ][0 ],
503
- learning_rate * 1
504
- - (1 - learning_rate ) * lambda_ * top_singular_vector0 [1 ][1 ],
505
- ],
506
- self .evaluate (var0 [1 ]),
507
- )
485
+ learning_rate = 0.1
486
+ lambda_ = 0.1
487
+ ord = "nuclear"
488
+ opt = cg_lib .ConditionalGradient (
489
+ learning_rate = learning_rate , lambda_ = lambda_ , ord = ord
490
+ )
491
+ _ = opt .minimize (loss , var_list = [var0 ])
508
492
509
- def testTensorLearningRateAndConditionalGradientFrobenius (self ):
510
- for dtype in [tf .half , tf .float32 , tf .float64 ]:
511
- with self .cached_session ():
512
- var0 = tf .Variable ([1.0 , 2.0 ], dtype = dtype )
513
- var1 = tf .Variable ([3.0 , 4.0 ], dtype = dtype )
514
- grads0 = tf .constant ([0.1 , 0.1 ], dtype = dtype )
515
- grads1 = tf .constant ([0.01 , 0.01 ], dtype = dtype )
516
- norm0 = tf .math .reduce_sum (grads0 ** 2 ) ** 0.5
517
- norm1 = tf .math .reduce_sum (grads1 ** 2 ) ** 0.5
518
- ord = "fro"
519
- cg_opt = cg_lib .ConditionalGradient (
520
- learning_rate = tf .constant (0.5 ), lambda_ = tf .constant (0.01 ), ord = ord
521
- )
522
- cg_update = cg_opt .apply_gradients (zip ([grads0 , grads1 ], [var0 , var1 ]))
523
- if not tf .executing_eagerly ():
524
- self .evaluate (tf .compat .v1 .global_variables_initializer ())
525
- # Fetch params to validate initial values
526
- self .assertAllClose ([1.0 , 2.0 ], self .evaluate (var0 ))
527
- self .assertAllClose ([3.0 , 4.0 ], self .evaluate (var1 ))
493
+ # Run 1 step of cg_op
494
+ test_utils .assert_allclose_according_to_type (
495
+ [
496
+ learning_rate * 1
497
+ - (1 - learning_rate ) * lambda_ * top_singular_vector0 [1 ][0 ],
498
+ learning_rate * 1
499
+ - (1 - learning_rate ) * lambda_ * top_singular_vector0 [1 ][1 ],
500
+ ],
501
+ var0 [1 ],
502
+ )
528
503
529
- # Check we have slots
530
- self .assertEqual (["conditional_gradient" ], cg_opt .get_slot_names ())
531
- slot0 = cg_opt .get_slot (var0 , "conditional_gradient" )
532
- self .assertEquals (slot0 .get_shape (), var0 .get_shape ())
533
- slot1 = cg_opt .get_slot (var1 , "conditional_gradient" )
534
- self .assertEquals (slot1 .get_shape (), var1 .get_shape ())
535
504
536
- if not tf .executing_eagerly ():
537
- self .assertFalse (slot0 in tf .compat .v1 .trainable_variables ())
538
- self .assertFalse (slot1 in tf .compat .v1 .trainable_variables ())
505
+ @pytest .mark .usefixtures ("maybe_run_functions_eagerly" )
506
+ @pytest .mark .parametrize ("dtype" , [tf .half , tf .float32 , tf .float64 ])
507
+ def test_tensor_learning_rate_and_conditional_gradient_frobenius (dtype ):
508
+ var0 = tf .Variable ([1.0 , 2.0 ], dtype = dtype )
509
+ var1 = tf .Variable ([3.0 , 4.0 ], dtype = dtype )
510
+ grads0 = tf .constant ([0.1 , 0.1 ], dtype = dtype )
511
+ grads1 = tf .constant ([0.01 , 0.01 ], dtype = dtype )
512
+ norm0 = tf .math .reduce_sum (grads0 ** 2 ) ** 0.5
513
+ norm1 = tf .math .reduce_sum (grads1 ** 2 ) ** 0.5
514
+ ord = "fro"
515
+ cg_opt = cg_lib .ConditionalGradient (
516
+ learning_rate = tf .constant (0.5 ), lambda_ = tf .constant (0.01 ), ord = ord
517
+ )
518
+ _ = cg_opt .apply_gradients (zip ([grads0 , grads1 ], [var0 , var1 ]))
519
+
520
+ # Check we have slots
521
+ assert ["conditional_gradient" ] == cg_opt .get_slot_names ()
522
+ slot0 = cg_opt .get_slot (var0 , "conditional_gradient" )
523
+ assert slot0 .get_shape () == var0 .get_shape ()
524
+ slot1 = cg_opt .get_slot (var1 , "conditional_gradient" )
525
+ assert slot1 .get_shape () == var1 .get_shape ()
526
+
527
+ # Check that the parameters have been updated.
528
+ test_utils .assert_allclose_according_to_type (
529
+ np .array (
530
+ [
531
+ 1.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.1 / norm0 ,
532
+ 2.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.1 / norm0 ,
533
+ ]
534
+ ),
535
+ var0 .numpy (),
536
+ )
537
+ test_utils .assert_allclose_according_to_type (
538
+ np .array (
539
+ [
540
+ 3.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.01 / norm1 ,
541
+ 4.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.01 / norm1 ,
542
+ ]
543
+ ),
544
+ var1 .numpy (),
545
+ )
546
+ # Step 2: the conditional_gradient contain the
547
+ # previous update.
548
+ cg_opt .apply_gradients (zip ([grads0 , grads1 ], [var0 , var1 ]))
549
+ # Check that the parameters have been updated.
550
+ test_utils .assert_allclose_according_to_type (
551
+ np .array (
552
+ [
553
+ (1.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.1 / norm0 ) * 0.5
554
+ - (1 - 0.5 ) * 0.01 * 0.1 / norm0 ,
555
+ (2.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.1 / norm0 ) * 0.5
556
+ - (1 - 0.5 ) * 0.01 * 0.1 / norm0 ,
557
+ ]
558
+ ),
559
+ var0 .numpy (),
560
+ )
561
+ test_utils .assert_allclose_according_to_type (
562
+ np .array (
563
+ [
564
+ (3.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.01 / norm1 ) * 0.5
565
+ - (1 - 0.5 ) * 0.01 * 0.01 / norm1 ,
566
+ (4.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.01 / norm1 ) * 0.5
567
+ - (1 - 0.5 ) * 0.01 * 0.01 / norm1 ,
568
+ ]
569
+ ),
570
+ var1 .numpy (),
571
+ )
539
572
540
- if not tf .executing_eagerly ():
541
- self .evaluate (cg_update )
542
- # Check that the parameters have been updated.
543
- norm0 = self .evaluate (norm0 )
544
- norm1 = self .evaluate (norm1 )
545
- self .assertAllCloseAccordingToType (
546
- np .array (
547
- [
548
- 1.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.1 / norm0 ,
549
- 2.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.1 / norm0 ,
550
- ]
551
- ),
552
- self .evaluate (var0 ),
553
- )
554
- self .assertAllCloseAccordingToType (
555
- np .array (
556
- [
557
- 3.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.01 / norm1 ,
558
- 4.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.01 / norm1 ,
559
- ]
560
- ),
561
- self .evaluate (var1 ),
562
- )
563
- # Step 2: the conditional_gradient contain the
564
- # previous update.
565
- if tf .executing_eagerly ():
566
- cg_opt .apply_gradients (zip ([grads0 , grads1 ], [var0 , var1 ]))
567
- else :
568
- self .evaluate (cg_update )
569
- # Check that the parameters have been updated.
570
- self .assertAllCloseAccordingToType (
571
- np .array (
572
- [
573
- (1.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.1 / norm0 ) * 0.5
574
- - (1 - 0.5 ) * 0.01 * 0.1 / norm0 ,
575
- (2.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.1 / norm0 ) * 0.5
576
- - (1 - 0.5 ) * 0.01 * 0.1 / norm0 ,
577
- ]
578
- ),
579
- self .evaluate (var0 ),
580
- )
581
- self .assertAllCloseAccordingToType (
582
- np .array (
583
- [
584
- (3.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.01 / norm1 ) * 0.5
585
- - (1 - 0.5 ) * 0.01 * 0.01 / norm1 ,
586
- (4.0 * 0.5 - (1 - 0.5 ) * 0.01 * 0.01 / norm1 ) * 0.5
587
- - (1 - 0.5 ) * 0.01 * 0.01 / norm1 ,
588
- ]
589
- ),
590
- self .evaluate (var1 ),
591
- )
592
573
574
+ @test_utils .run_all_in_graph_and_eager_modes
575
+ class ConditionalGradientTest (tf .test .TestCase ):
593
576
def _dbParamsFrobeniusCG01 (self ):
594
577
"""Return dist-belief conditional_gradient values.
595
578
0 commit comments